././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6591353 python_watcher-14.0.0/0000775000175000017500000000000000000000000014655 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/.coveragerc0000664000175000017500000000026500000000000017001 0ustar00zuulzuul00000000000000[run] branch = True source = watcher omit = watcher/tests/* watcher/hacking/* [report] ignore_errors = True exclude_lines = @abc.abstract raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/.mailmap0000664000175000017500000000013100000000000016271 0ustar00zuulzuul00000000000000# Format is: # # ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/.pre-commit-config.yaml0000664000175000017500000000327400000000000021144 0ustar00zuulzuul00000000000000--- repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: # whitespace - id: trailing-whitespace - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' - id: check-byte-order-marker # file format and permissions - id: check-ast - id: debug-statements - id: check-json files: .*\.json$ - id: check-yaml files: .*\.(yaml|yml)$ - id: check-executables-have-shebangs - id: check-shebang-scripts-are-executable # git - id: check-added-large-files - id: check-case-conflict - id: detect-private-key - id: check-merge-conflict - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 hooks: - id: remove-tabs exclude: '.*\.(svg)$' - repo: https://opendev.org/openstack/hacking rev: 7.0.0 hooks: - id: hacking additional_dependencies: [] exclude: '^(doc|releasenotes|tools)/.*$' - repo: https://github.com/PyCQA/bandit rev: 1.7.6 hooks: - id: bandit args: ['-x', 'tests', '-s', 'B101,B311,B320'] - repo: https://github.com/hhatto/autopep8 rev: v2.3.1 hooks: - id: autopep8 files: '^.*\.py$' - repo: https://github.com/codespell-project/codespell rev: v2.3.0 hooks: - id: codespell args: ['--ignore-words=doc/dictionary.txt'] - repo: https://github.com/sphinx-contrib/sphinx-lint rev: v1.0.0 hooks: - id: sphinx-lint args: [--enable=default-role] files: ^doc/|releasenotes|api-guide types: [rst] - repo: https://github.com/PyCQA/doc8 rev: v1.1.2 hooks: - id: doc8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/.stestr.conf0000664000175000017500000000006000000000000017122 0ustar00zuulzuul00000000000000[DEFAULT] test_path=./watcher/tests top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/.zuul.yaml0000664000175000017500000002221200000000000016615 0ustar00zuulzuul00000000000000- project: queue: watcher templates: - check-requirements - openstack-cover-jobs - openstack-python3-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - watcher-tempest-functional - watcher-tempest-functional-jammy - watcher-grenade - watcher-tempest-strategies - watcher-tempest-actuator - watcherclient-tempest-functional - watcher-tempest-functional-ipv6-only - watcher-prometheus-integration gate: jobs: - watcher-tempest-functional - watcher-tempest-functional-jammy - watcher-tempest-functional-ipv6-only - job: name: watcher-tempest-actuator parent: watcher-tempest-multinode vars: tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_actuator - job: name: watcher-tempest-strategies parent: watcher-tempest-multinode vars: tempest_concurrency: 1 tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_strategies - job: name: watcher-tempest-multinode parent: watcher-tempest-functional nodeset: openstack-two-node-noble roles: - zuul: openstack/tempest group-vars: subnode: devstack_local_conf: post-config: $WATCHER_CONF: watcher_cluster_data_model_collectors.compute: period: 120 watcher_cluster_data_model_collectors.baremetal: period: 120 watcher_cluster_data_model_collectors.storage: period: 120 devstack_services: watcher-api: false watcher-decision-engine: true watcher-applier: false c-bak: false ceilometer: false ceilometer-acompute: false ceilometer-acentral: false ceilometer-anotification: false watcher: false gnocchi-api: false gnocchi-metricd: false rabbit: false mysql: false vars: devstack_local_conf: post-config: $WATCHER_CONF: watcher_cluster_data_model_collectors.compute: period: 120 watcher_cluster_data_model_collectors.baremetal: period: 120 watcher_cluster_data_model_collectors.storage: period: 120 test-config: $TEMPEST_CONFIG: compute: min_compute_nodes: 2 min_microversion: 2.56 compute-feature-enabled: live_migration: true block_migration_for_live_migration: true placement: min_microversion: 1.29 devstack_plugins: ceilometer: https://opendev.org/openstack/ceilometer - job: name: watcher-tempest-functional parent: devstack-tempest timeout: 7200 required-projects: &base_required_projects - openstack/ceilometer - openstack/python-openstackclient - openstack/python-watcherclient - openstack/watcher - openstack/watcher-tempest-plugin - openstack/tempest vars: &base_vars devstack_plugins: watcher: https://opendev.org/openstack/watcher devstack_services: watcher-api: true watcher-decision-engine: true watcher-applier: true tempest: true s-account: false s-container: false s-object: false s-proxy: false tempest_plugins: - watcher-tempest-plugin tempest_test_regex: watcher_tempest_plugin.tests.api tox_envlist: all zuul_copy_output: /etc/hosts: logs # TODO(gmann): As per the 2025.1 testing runtime, we need to run at least # one job on jammy. This job can be removed in the next cycle(2025.2) - job: name: watcher-tempest-functional-jammy description: This is integrated job testing on Ubuntu jammy(22.04) parent: watcher-tempest-functional nodeset: openstack-single-node-jammy vars: <<: *base_vars python_version: '3.9' - job: name: watcher-tempest-functional-ipv6-only parent: devstack-tempest-ipv6 description: | Watcher devstack tempest tests job for IPv6-only deployment required-projects: *base_required_projects vars: *base_vars - job: name: watcher-grenade parent: grenade required-projects: - openstack/watcher - openstack/python-watcherclient - openstack/watcher-tempest-plugin vars: *base_vars irrelevant-files: &irrelevent_files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^watcher/hacking/.*$ - ^watcher/tests/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ - job: # This job is used in python-watcherclient repo name: watcherclient-tempest-functional parent: watcher-tempest-functional timeout: 4200 vars: tempest_concurrency: 1 tempest_test_regex: watcher_tempest_plugin.tests.client_functional - job: name: watcher-sg-core-tempest-base parent: devstack-tempest nodeset: openstack-two-node-noble description: | This job is for testing watcher and sg-core/prometheus installation abstract: true pre-run: - playbooks/generate_prometheus_config.yml irrelevant-files: *irrelevent_files timeout: 7800 required-projects: &base_sg_required_projects - openstack/aodh - openstack/ceilometer - openstack/tempest - openstack-k8s-operators/sg-core - openstack/watcher - openstack/python-watcherclient - openstack/watcher-tempest-plugin - openstack/devstack-plugin-prometheus vars: configure_swap_size: 8192 devstack_plugins: ceilometer: https://opendev.org/openstack/ceilometer aodh: https://opendev.org/openstack/aodh sg-core: https://github.com/openstack-k8s-operators/sg-core watcher: https://opendev.org/openstack/watcher devstack-plugin-prometheus: https://opendev.org/openstack/devstack-plugin-prometheus devstack_services: watcher-api: true watcher-decision-engine: true watcher-applier: true tempest: true # We do not need Swift in this job so disable it for speed # Swift services s-account: false s-container: false s-object: false s-proxy: false # Prometheus related service prometheus: true node_exporter: true devstack_localrc: CEILOMETER_BACKENDS: "sg-core" CEILOMETER_PIPELINE_INTERVAL: 15 CEILOMETER_ALARM_THRESHOLD: 6000000000 NODE_EXPORTER_ENABLE: false PROMETHEUS_ENABLE: false PROMETHEUS_SERVICE_SCRAPE_TARGETS: "sg-core,node-exporter" PROMETHEUS_CONFIG_FILE: "/home/zuul/prometheus.yml" devstack_local_conf: post-config: $WATCHER_CONF: watcher_datasources: datasources: prometheus prometheus_client: host: 127.0.0.1 port: 9090 watcher_cluster_data_model_collectors.compute: period: 120 watcher_cluster_data_model_collectors.baremetal: period: 120 watcher_cluster_data_model_collectors.storage: period: 120 test-config: $TEMPEST_CONFIG: compute: min_compute_nodes: 2 min_microversion: 2.56 compute-feature-enabled: live_migration: true block_migration_for_live_migration: true placement: min_microversion: 1.29 service_available: sg_core: True telemetry_services: metric_backends: prometheus telemetry: disable_ssl_certificate_validation: True ceilometer_polling_interval: 15 optimize: datasource: prometheus tempest_plugins: - watcher-tempest-plugin tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_strategies tempest_concurrency: 1 tox_envlist: all zuul_copy_output: /etc/prometheus/prometheus.yml: logs group-vars: subnode: devstack_plugins: ceilometer: https://opendev.org/openstack/ceilometer sg-core: https://github.com/openstack-k8s-operators/sg-core devstack-plugin-prometheus: https://opendev.org/openstack/devstack-plugin-prometheus devstack_services: ceilometer-acompute: true sg-core: false prometheus: false node_exporter: true devstack_localrc: CEILOMETER_BACKEND: "none" CEILOMETER_BACKENDS: "none" # sg_core related var NODE_EXPORTER_ENABLE: false PROMETHEUS_ENABLE: false devstack_local_conf: post-config: $WATCHER_CONF: watcher_cluster_data_model_collectors.compute: period: 120 watcher_cluster_data_model_collectors.baremetal: period: 120 watcher_cluster_data_model_collectors.storage: period: 120 - job: name: watcher-prometheus-integration parent: watcher-sg-core-tempest-base ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591600.0 python_watcher-14.0.0/AUTHORS0000664000175000017500000001743100000000000015733 0ustar00zuulzuul0000000000000098k <18552437190@163.com> Akihito INOH Alexander Chadin Alexander Chadin Alexandr Stavitskiy Alfredo Moralejo Amy Fong Andrea Frittoli Andreas Jaeger Andreas Jaeger Antoine Cabot Anton Khaldin Atul Pandey Bin Zhou Biswajeeban Mishra Bruno Grazioli BubaVV Béla Vancsics Cao Xuan Hoang Chandan Kumar (raukadah) ChangBo Guo(gcb) Chaozhe.Chen Chris MacNaughton Chris Spencer Clark Boylan Daniel Pawlik Dantali0n Dao Cong Tien Darren Shaw David TARDIVEL Doug Hellmann Douglas Viroel Douglas Viroel Drew Thorstensen Edwin Zhai Egor Panfilov Erik Olof Gunnar Andersson Fanis Kalimullin Feng Shengqin Flavio Percoco ForestLee Ghanshyam Mann Guang Yee Gábor Antal Ha Van Tu Hervé Beraud Hidekazu Nakamura Hoang Trung Hieu Ian Wienand Iswarya_Vakati Jaewoo Park James E. Blair James Page Jean-Emile DARTOIS Jeremy Liu Jiri Podivin Joe Cropper Ken'ichi Ohmichi Kevin_Zheng Kien Nguyen Lance Bragstad Larry Rensing LiXiangyu Lin Yang Lucian Petrut Luigi Toscano Luong Anh Tuan M V P Nitesh Margarita Shakhova Martin Kopec Matt Riedemann Michael Gugino Michelle Mandel Muzammil Mueen Ngo Quoc Cuong Nguyen Hai Nguyen Hai Truong Nguyen Hung Phuong Nishant Kumar OpenStack Release Bot Palimariu Marius Pradeep Kumar Singh Prashanth Hari Prudhvi Rao Shedimbi Q.hongtao Ralf Rantzau Ronelle Landy Sampath Priyankara Santhosh Fernandes Sean McGinnis Sean Mooney ShangXiao Steve Kowalik Steve Wilkerson Sumit Jamgade Susanne Balle Swapnil Kulkarni (coolsvap) Takashi Kajinami Takashi Kajinami Takashi Natsume Tatiana Kholkina Taylor Peoples Thierry Carrez Tin Lam Tobias Urdin Tobias Urdin Tomasz Kaczynski Tomasz Trębski Viacheslav Samarin Viktor Varga Vincent Françoise Vladimir Ostroverkhov Vu Cong Tuan XiaojueGuan XieYingYun Yaguo Zhou Yatin Kumbhare Yosef Hoffman Yumeng Bao YumengBao Yumeng_Bao Zhenyu Zheng Zhenzan Zhou aditi aditi akhiljain23 akihito-inoh avnish baiwenteng caoyuan chao liu chenaidong1 chengebj5238 chenghuiyu chenke chenker chenming chenxing cima deepak_mourya digambar ericxiett gaofei gaozx gecong1973 gengchc2 ghanshyam haris tanvir howardlee inspurericzhang iswarya_vakati jacky06 jaugustine jeremy.zhang jinquanni junjie huang licanwei limin0801 lingyongxu liushuobj liyanhang lvxianguo m maaoyu melissaml mergalievibragim pangliye pengyuesheng qinchunhua qiufossen rajat29 ricolin ricolin root sai shangxiaobj sharat.sharma shubhendu songwenping sue sunjia suzhengwei suzhengwei the.bling ting.wang unknown vmahe wangjiaqi07 wangqi wangxiyuan wangzihao watanabe isao weiweigu wu.chunyang wu.chunyang xiaoxue xuanyandong yanxubin yuhui_inspur zhang.lei zhangbailin zhangdebo zhangguoqing zhangjianfeng zhangyanxian zhangyanxian zhengwei6082 zhoulinhui zhufl zhulingjie zhurong zhuzeyu zte-hanrong 鲍昱蒙00205026 <00205026@zte.intra> ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/CONTRIBUTING.rst0000664000175000017500000000103400000000000017314 0ustar00zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/watcher ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591600.0 python_watcher-14.0.0/ChangeLog0000664000175000017500000016375300000000000016446 0ustar00zuulzuul00000000000000CHANGES ======= 14.0.0 ------ * Add epoxy prelude * Replace deprecated abc.abstractproperty * Drop implicit test dependency on iso8601 * Enable prometheus datasource in watcher-prometheus-integration job * Enable prometheus and node\_exporter from devstack-plugin-prometheus * Deprecate Monasca data source * Further database refactoring * pre-commit: Integrate bandit * Remove unused os-api-ref from test requirements * Drop import fallback for Python 2 * Add support for instance metrics to prometheus datasource * Drop bandit B320 profile to fix tox -e bandit interface * Add Tempest test for Prometheus integration * Add prometheus data source for watcher decision engine * Imported Translations from Zanata * Remove ceilometer datasource * X-Project-Name key in test code was duplicated * Fix create\_continuous\_audit\_with\_wrong\_interval test assert * Update gate jobs as per the 2025.1 cycle testing runtime * Fix incompatiablity between apscheduler and eventlet * [pre-commit] enforce pre-commit checks in ci * [docs] apply sphinx-lint to docs * Replace deprecated configure\_auth\_token\_middleware * reno: Update master for unmaintained/2023.1 * Remove default override for config options policy\_file * [pre-commit] Fix execute and shebang lines * [eventlet] Ensure unit tests are monkey patched * [tox] update tox.ini to enable debugging * [pre-commit] fix typos and configure codespell * Update python versions, drop py3.8 * Update master for stable/2024.2 * Drop unnecessary 'x' bit from doc config file * Run watcher-db-manage in grenade testing from venv * [pre-commit] Add initial pre-commit config * tox: Drop envdir * Replace deprecated datetime.utcnow() * Remove workaround for eventlet < 0.27.0 * Convert CRLF to LF * Bump hacking * Replace deprecated LegacyEngineFacade 13.0.0 ------ * Imported Translations from Zanata * SQLAlchemy 2.0: Omnibus fixes patch * reno: Update master for unmaintained/zed * Fix oslo.db >= 15.0.0 compatibility * Update master for stable/2024.1 * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria * reno: Update master for unmaintained/yoga 12.0.0.0rc1 ----------- * Update python classifier in setup.cfg * Add MAAS support * Update action json schema * Fix object tests * vm workload consolidation: use actual host metrics * Handle deprecated "cpu\_util" metric * Avoid performing retries in case of missing resources * Improve vm\_consolidation logging * vm workload consolidation: allow cold migrations * Unblock the CI gate * Update master for stable/2023.2 11.0.0 ------ * Add timeout option for Grafana request * Imported Translations from Zanata * Fix watcher comment * Imported Translations from Zanata * Update master for stable/2023.1 10.0.0 ------ * update saving\_energy docs * Modify saving\_energy log info * Fix passenv in tox.ini * Use new get\_rpc\_client API from oslo.messaging * Move queue declaration to project level * Fix compatibility with oslo.db 12.1.0 * Switch to 2023.1 Python3 unit tests and generic template name * Update master for stable/zed 9.0.0.0rc1 ---------- * Imported Translations from Zanata * remove unicode from code * Tests: fix requirements for unit tests * Watcher DB upgrde compatibility consideration for add\_apscheduler\_jobs * Add Python3 zed unit tests * Update master for stable/yoga 8.0.0 ----- * Remove ceilometerclient dependecy * Add Python3 yoga unit tests * Update master for stable/xena 7.0.0 ----- * BugFix: Prevent float type variables from being passed to random * Use Block Storage API v3 instead of API v2 * use HTTPStatus instead of direct code * Add watcher dashboard to devstack documentation * Changed minversion in tox to 3.18.0 * Manage constraints with testenv install\_command * Use py3 as the default runtime for tox * Enable tls-proxy as default in test jobs * setup.cfg: Replace dashes with underscores * Replace deprecated with\_lockmode with with\_for\_update * Add Python3 xena unit tests * Update master for stable/wallaby * requirements: Drop os-testr 6.0.0 ----- * [goal] Deprecate the JSON formatted policy file * Fix gate requirement checks job * incorrect name in unit test * Use common rpc pattern for all services * Drop lower-constraints * remove bandit B322 check * Fix missing self argument in instances\_no\_attached * Fix parameter passed to IronicNodeNotFound exception * Imported Translations from Zanata * Remove the unused coding style modules * Remove usage of six * Bump py37 to py38 in tox.ini * Remove six * Add Python3 wallaby unit tests * Update master for stable/victoria * [goal] Migrate testing to ubuntu focal 5.0.0 ----- * Use importlib to take place of im module * Implements base method for time series metrics * Native Zuul v3 watcher-grenade job + some cleanup * option to rollback action\_plan when it fails * resize action don't support revert * Watcher API supports strategy name when creating audit template * Revert "Don't revert Migrate action" * remove mox3 * voting watcher-grenade * Check if scope is None * Use uwsgi binary from path and mark grenade non-voting * Use unittest.mock instead of mock * Cap jsonschema 3.2.0 as the minimal version * Compatible with old scope format * Use unittest.mock instead of third party mock * Switch to newer openstackdocstheme and reno versions * Fix requirements check * Remove translation sections from setup.cfg * hacking: force explicit import of python's mock * Use unittest.mock instead of third party mock * Fix hacking min version to 3.0.1 * Monkey patch original current\_thread \_active * Add py38 package metadata * Remove future imports * Remove Babel requirement * Imported Translations from Zanata * Add Python3 victoria unit tests * Update master for stable/ussuri * Remove six[8] remove requirement&low-requirement * Remove six[7] 4.0.0.0rc1 ---------- * convert EfficacyIndicator.value to float type * Remove six[6] * Remove six[5] * Remove six[4] * Remove six[3] * Remove six[2] * Remove six[1] * update description about audit argument interval * remove wsmeext.sphinxext * Cleanup py27 support * Block Sphinx 3.0.0 * Update hacking for Python3 * Removed py27 in testing doc * Add procname for uwsgi based service watcher-api * just set necessary config options * simplify doc directory * Add config option enable\_webhooks\_auth * api-ref: Add webhook API reference * Doc: Add EVENT audit description * Community Goal: Project PTL & Contrib Docs Update * Add api version history * releasenotes: Fix reference url * Add releasenote for event-driven-optimization-based * doc: move Concurrency doc to admin guide * doc for event type audit * Move install doc to user guide * Update user guide doc * Add webhook api * Fix duplicated words issue like "an active instance instance" * Add audit type: event * Add list datamodel microversion to api-ref * Add a new microversion for data model API * Releasenote for decision engine threadpool * Use enum class define microversions * Start README.rst with a better title * Change self.node to self.nodes in model\_root * Documentation on concurrency for contributors * replace host\_url with application\_url * Migrate grenade jobs to py3 * [ussuri][goal] Drop python 2.7 support and testing * Refactoring the codes about getting used and free resources * Use threadpool when building compute data model * General purpose threadpool for decision engine * tox: Keeping going with docs * Switch to Ussuri jobs * Don't throw exception when missing metrics * Remove print() * Update master for stable/train 3.0.0.0rc1 ---------- * Fix damodel list return None error When has a compute node * Fix misspelling * skip deleted instance when creating datamodel * Fix unit test failed * Watcher planner slector releasenote * Set strategy planner * Get planner from solution * Build pdf docs * update test about cinderclient v1 * correct watcher project for oslo\_config * Add node resource consolidation planner * Watcher Planner Selector * Add releasenote about bp show-datamodel-api * node resource consolidation * Fix misspell word * Remove redundant word 'strategy' * Add node\_resource\_consolidation doc * Add watcher-specs link to readme.rst * Add get node used and free resources * Implement watcher datamodel list in watcher-api * Implement watcher datamodel list in watcher-decision-engine * Add api-ref doc for data model api * add audit parameter to do\_execute * improve strategies tempest * add placement min\_microversion * set compute min\_microversion * [train][goal] Define new 'watcher-tempest-functional-ipv6-only' job * Remove unused disk\_capacity field * Don't revert Migrate action * update workload\_balance strategy * update node resource capacity for basic\_consolidation * update host\_maintenance strategy * update noisy\_neighbor strategy * update outlet\_temp\_control strategy * add releasenote for bp improve-compute-data-model * update vm\_workload\_consolidation strategy * Remove resource used fields from ComputeNode * Fix var src\_extra\_specs error * Remove stale comment in method execute() * Add resource capacity property * Getting data from placement when updating datamodel * replace disk\_capacity by disk * set disk field to disk capacity * Check resource class before using * remove id field from CDM * Update api-ref location * Improve Compute Data Model * Add call\_retry for ModelBuilder for error recovery * Remove useless gconfig process in watcher/api/scheduling.py * Optimize method list\_opts() in watcher/conf/opts.py * Fix watcher/conf/applier.py default worker value * Remove useless \_opts.py * Baseclass for ModelBuilder with audit scope * Move datasources folder into decision\_engine * Add reource\_name for save\_energy in action input parameter field * Add get\_compute\_node\_by\_uuid * Resolve aggregate error in workload\_stabilization * Remove redundant human\_id fields when creating and updating datamodel * Replace human\_id with name in grafana doc * Add marker option for get\_instance\_list() * remove baremetal nodes when building CDM * Add reource\_name for zone\_migration in action input parameter field * Grafana proxy datasource to retrieve metrics * Add reource\_name in action input parameter field * Add get\_node\_by\_name * Reduce the query time of the instances when call get\_instance\_list() * remove baremetal nodes from hypversior list * Remove notifier\_driver option in Watcher devstack * Improve logging in building of nova data model * Releasenote for grafana datasource * improve OptGroup consistency across configuration * Blacklist sphinx 2.1.0 (autodoc bug) * Add Python 3 Train unit tests * Fix invalid assert states * Add name field for test data * Add uWSGI support * Add name for instance in Watcher datamodel * Documentation configuring grafana datasource * Configure nova notification\_format for grenade * Fix placement\_client group help docs generation * Improve the configuration parameters for grafana * Configure nova notification format in non-grenade CI jobs * improve the process of instance\_created.end * remove tail\_log * Update strategy doc * Implement the configuration for Grafana datasource * Fix missing print format * typo ceilometer url * Replace removed exceptions and prevent regression * Define a new InstanceNotMapped exception * Move datasource query\_retry into baseclass * Fix base enable\_plugin branch for grenade run * Remove dead code * Map instance to its node * update contraints url * Backwards compatibility for node parameter * Fix property access in test\_global\_preference\* tests * Add Placement helper * Cleanup ConfFixture * Fix string formatting * check instance state for instance.update * add strategy tempest job * Remove apidoc * Optimize NovaHelper.get\_compute\_node\_by\_hostname * Optimize hypervisor API calls * Add missing ws separator between words * Group instance methods together in nova\_helper * Audit API supports new force option * Optimize NovaClusterDataModelCollector.add\_instance\_node * Fix test\_metric\_file\_override metric from backend * Add force field to Audit * Remove 2.56 version compatibility check * Require nova\_client.api\_version >= 2.56 * Improve exceptions and logging in ds manager * Improve DevStack documentation to support metrics * formal datasource interface implementation * Improve Gnocchi and Monasca datasource tests * Allow using file to override metric map * support-keystoneclient-option * Fix typo in ceilometer datasource * Handle no nova CDM in notification code * Remove unused utilities file * Update migration notification * Remove bandit from lower-constraints * Update Sphinx requirement * Fix Stein version in watcher-status docs * Add doc/requirements.txt to venv tox target * Remove dead code from NovaClusterDataModelCollector * Enhance the collector\_plugins option help text * Use base\_strategy's add\_action\_migrate method * Fix\_inappropriate\_name * update api version history * allow building docs without ceilometer client * pass default\_config\_dirs variable for config initialization * docs: fix link to install guide from user guide * Remove watcher.openstack.common=WARN from \_DEFAULT\_LOG\_LEVELS * Add force field to api-ref * Fix API version header * Remove unused exceptions * Fix bandit runs with 1.6.0 * Allow for global datasources preference from config * Use the common logging setup function in devstack runs * Fix reraising of exceptions * Using node replace resource\_id in method add\_action\_disable\_node() * Put the method add\_migration() in base.py * update wsme types * Add tempest voting * Resolve problems with audit scope and add tests * Replace git.openstack.org with opendev.org * Add hardware.cpu\_util in workload\_stabilization * Drop use of git.openstack.org * OpenDev Migration Patch * separate launching audit scheduler * Replace HOST\_IP to SERVICE\_HOST * remove py35 * Uncap jsonschema * Fix docs gate failed * Adapt Watcher to Python3.7 * Move eventlet monkey patch code * Fix lower-constraint deps handling * Fix openstack-tox-lower-constraint TIMED\_OUT Error * Update meeting schedule to new bi-weekly format * Make datasource methods match names of metrics * Replace openstack.org git:// URLs with https:// * Imported Translations from Zanata * Update master for stable/stein 2.0.0 ----- * Move client function test to watcher-tempest-plugin * Access to action's uuid by key * Migrate legacy jobs to Ubuntu Bionic * releasenote for data model scope * Fix unittest failed * Remove unused type check 'int' in audit.py * Generalize exceptions & structure of strategies * scope for datamodel * Fix inappropriate description about the audit\_state\_machine.png * improve \_collect\_aggregates * Provide two arguments to exception's message * make ceilometer client import optional * Fix uniform airflow strategy config parameter * Fix outlet\_temp\_control config parameter * remove config parameter 'datasource' * Add the define of vm\_workload\_consolidation job * change config parameter from 'datasource' to 'datasources' * Move datasources metric mappings out of base.py * function get\_sd return 0 early if len(hosts) is 0 * Update storage\_balance job * Add storage balance job * Update user guide * Fix E731 error * trivial * [Trivial fix] Do not use self in classmethod * Add grenade job * Update hacking version * Add version api ref * update api-ref for audit start/end time * Use template for lower-constraints * Remove unused modules * Add host maintenance tempest * Fix mailing list archive URL * Fix stop\_watcher function * Deprecate Ceilometer Datasource * Fix doc about nova notifications * Remove hostname-related playbooks * audit create request can't set scope * Change openstack-dev to openstack-discuss channel * Increase the unit test coverage of host\_maintenance.py * update doc for install ubuntu * Fix spelling error in the comments of file host\_maintenance.py * Add audit scoper for baremetal data model * Increase the unit test coverage of vm\_workload\_consolidation.py * Fix audit\_template\_uuid description * start tls-proxy (if enabled) before checking for api * remove older api doc * Add missing ws separator between words * Update doc for vm\_workload\_consolidation strategy * Increase the unit test coverage of cinder\_helper.py * Increase the unit test coverage of nova\_helper.py * Fix version header in the response * Enhance Watcher Applier Engine * Remove unsuitable brackets * Imported Translations from Zanata * To avoid ambiguity for flavor\_id * Fix accessing to optional cinder pool attributes * Add cover job * Remove redundant docstring * optimize get\_instances\_by\_node * Adjust the same format as above * Make watcherclient-tempest-functional test non-voting * Add detailed unit test documentation for watcher * Update min tox version to 2.0 * Fix parameter type for cinder pool * update datamodel by nova notifications * API Microversioning * remove set\_host\_offline * Watcher doesn't need paramiko * Don't need nova notifications * Fix oslo\_versionedobjects warnings * Add framework for watcher-status upgrade check * Update documentation regarding DataSource for strategies * Use limit -1 for nova servers list * tenant\_id should be project\_id in instance element * add start and end time for continuous audit * Remove uses of rpc\_backend (oslo\_config) * Don't quote {posargs} in tox.ini * Do not pass www\_authenticate\_uri to RequestContext * remove nova legacy notifications * Fix link to Watcher API * Fix audit creation with named goal and strategy * Provide region name while initialize clients * Add efficacy indicators for workload\_stabilization strategy * ignore .testrepository * Fix wrong audit scope * add python 3.6 unit test job * switch documentation job to new PTI * Follow the new PTI for building docs * Imported Translations from Zanata * Remove warning log in common.context * Remove hosts if can't find hosts in host agrregate * Fix goal method in policy * import zuul job settings from project-config * Remove -u root as mysql is executed with root user * Improve logs of Workload Stabilization strategy * Imported Translations from Zanata * Add hostname to API Reference * Update reno for stable/rocky 1.12.0 ------ * Fix TypeError in LOG.debug * fix unit test:test\_execute\_audit\_with\_interval\_no\_job * improve strategy doc * remove get\_flavor\_instance * Fix unittest MismatchError * only check decision engine service * remove extra'\_' and space * remove voluptuous * Update watcher-db-manage help doc * Fix strategies with additional time to initialize CDM * Add apscheduler\_jobs table to models * Fix AttributeError exception 1.11.0 ------ * Rescheduling continuous audits from FAILED nodes * Add HA support * Add noisy neighbor strategy doc * Fix service task interval * Add noisy neighbor description * remove LOG definitions that have not been used * trivial: fix strategy name * update Ubuntu version from 14.04 to 16.04 * Update host\_maintenance doc * Check job before removing it * update monascaclient version * Sync CDM among Decision Engines by using notification pool * Add actionplan list detail api ref * Remove help message about ZeroMQ driver * Switch to stestr * Remove non-voting jobs from gate queue * Remove undefined job * Triggers the api-ref-jobs to publish wather api reference * Fix unit test error * Use jsonschema to validate efficacy indicators * fix the rule name * Correcting url in action\_plan policy 1.10.0 ------ * fix tox python3 overrides * replace windows line endings with unix line endings * Restore requirements versions * Switch to oslo\_messaging.ConfFixture.transport\_url * Add API Reference for Watcher * Amend the spelling error of a word * add doc for host\_maintenance * Update pypi url to new url * Update storage CDM collector * Replace port 35357 with 5000 for test\_clients.py * Add Cinder Cluster Data Model Collector test case * add strategy host\_maintenance * Trivial: update url to new url * Fix to reuse RabbitMQ connection * Refactor watcher API for Action Plan Start * Update auth\_url in install docs * Updated tests on bug, when get list returns deleted items * Fix the openstack endpoint create failed * Update the default value for nova api\_verison * Moved do\_execute method to AuditHandler class * Fix typo in StorageCapacityBalance * Grouped \_add\_\*\_filters methods together * Replace of private \_create methods in tests * Exclude Project By Audit Scope * add strategy doc:storage capacity balance * Update requirements 1.9.0 ----- * add unittest for execute\_audit in audit/continuous.py * amend delete action policy * Replace cold migration to use Nova migration API * Add release notes link to README * Trivial fix of saving\_energy strategy doc * Update auth\_uri option to www\_authenticate\_uri * Added \_get\_model\_list base method for all get\_\*\_list methods * Trivial fix of user guide doc * zuulv3 optimization * Enable mutable config in Watcher * Several fixes of strategies docs * set one worker for watcherclient-tempest-functional job * Remove obsolete playbooks of legacy jobs * Updated from global requirements * add lower-constraints job * Replaced deprecated oslo\_messaging\_rabbit section * ZuulV3 jobs * Delete the unnecessary '-' * Fix sort of \*list command output * Remove version/date from CLI documentation * Adding driver to mysql connection URL * Updated from global requirements * ignore useless WARNING log message * Updated from global requirements * Add the missing markups for the hyperlink titles * Change the outdated links to the latest links in README * basic\_cons fix * Revert "Update OpenStack Installation Tutorial to Rocky" * Add parameter aggregation\_method for basic\_consolidation * Imported Translations from Zanata * Delete the unnecessary '-' * Update OpenStack Installation Tutorial to Rocky * Add parameter aggregation\_method for work\_stab * basic\_consolidation trivial fix * Fix Uuid and virtual\_free elements load error * Fix exception string format * Imported Translations from Zanata * Add the missing title of Configuration Guide * Fix change\_nova\_service\_state action * Updated Hacking doc * [Trivialfix]Modify a grammatical error * Fix old url links in doc * Add a hacking rule for string interpolation at logging * Complete schema of workload\_stabilization strategy * filter exclude instances during migration * Fix grammar errors * workload\_stabilization trivial fix * Updated from global requirements * Imported Translations from Zanata * Add support for networkx v2.0 * Updated from global requirements * Fix some dead link in docs * Update meeting time on odd weeks * fix misspelling of 'return' * Add missing release notes * Imported Translations from Zanata * Update reno for stable/queens 1.8.0 ----- * Zuul: Remove project name * Fix issues with aggregate and granularity attributes * Repalce Chinese double quotes to English double quotes * Fix get\_compute\_node\_by\_hostname in nova\_helper * Add zone migration strategy document * Updated from global requirements * Fixed AttributeError in storage\_model * Update zone\_migration comment * Zuul: Remove project name * Updated from global requirements * [Doc] Add actuator strategy doc * Imported Translations from Zanata * Remove redundant import alias * Fix strategy state * Add datasources to strategies 1.7.0 ----- * Add baremetal strategy validation * Strategy requirements * Add zone migration strategy * Fix workload\_stabilization unavailable nodes and instances * Update unreachable link * Updated from global requirements * Fix compute api ref link * Adapt workload\_balance strategy to multiple datasource backend * Adapt noisy\_neighbor strategy to multiple datasource backend * Adapt basic\_consolidation strategy to multiple datasource backend * check audit name length * Audit scoper for storage CDM * Imported Translations from Zanata * Update link address * Fix tempest devstack error * Add storage capacity balance Strategy * Updated from global requirements * Adapt workload\_stabilization strategy to new datasource backend * Updated from global requirements * Update pike install supermark to queens * Add the title of API Guide * Fix compute scope test bug * Add baremetal data model * Set apscheduler logs to WARN level * Fix configuration doc link * update audit API description * update action API description * use current weighted sd as min\_sd when starting to simulate migrations * correct audit parameter typo * Updated from global requirements * Fix watcher audit list command * check actionplan state when deleting actionplan * TrivialFix: remove redundant import alias * check audit state when deleting audit * reset job interval when audit was updated * Updated from global requirements * Fix releasenotes build * Update getting scoped storage CDM * Updated from global requirements * Fix 'unable to exclude instance' * Register default policies in code * listen to 'compute.instance.rebuild.end' event 1.6.0 ----- * Updated from global requirements * bug fix remove volume migration type 'cold' * Add and identify excluded instances in compute CDM * Server with PAUSE status can also live-migrate * Fix migrate action with migration\_type 'cold' * Updated from global requirements * Add Datasource Abstraction * Make gnocchi as default datasource * Updated from global requirements * Fix Bug Unable to get scoped data model * listen to 'compute.instance.resize.confirm.end' event * Notifications Changes Multiple Global Efficacy * 'get\_volume\_type\_by\_backendname' returns a list * Add app.wsgi to target of pep8 * [Doc] Fix ubuntu version in devstack installation * Remove setting of version/release from releasenotes * Updated from global requirements * Updated from global requirements * Migrate to Zuul v3 * Fix test runner config issues with os-testr 1.0.0 * Multiple global efficacy * Do not use “-y” for package install * check task\_state in the live\_migrate\_instance * Change HTTP to HTTPS * Updated from global requirements * Can't cancell CONTINUOUS audit * add name for audit, update audit notifications * Update doc and add release note about cdm-scoping * Update the useful links for Rally job * update API ref doc for ScoringEngine * Fix the strategy path of outlet\_temp\_control.py * Optimize the link address * Imported Translations from Zanata * Fix a typo * Unify the oslo\_log import usage * Optimise indentation for db client * Correct the schema format * add name for audit, changes for watcher api/db 1.5.0 ----- * Update the nova api\_version default value to 2.53 * Correct the instance migration link * Optimize check\_migrated in cinder\_helper.py * Optimize live\_migrate\_instance * Updated from global requirements * Add saving energy strategy description * Add documentation about saving energy strategy * Invoke version\_string in watcher/version.py directly * Fix \_build\_instance\_node for building Compute CDM * writing convention: do not use “-y” for package install * Update OpenStack Installation Tutorial to pike * Remove explicitly enable neutron * Fix the telemetry-measurements hyperlink for strategies * optimize update\_audit\_state * Optimize the import format by pep8 * Remove the unnecessary word * Fix TypeError in function chunkify * Fix action plan state change when action failed * Remove installation guide for openSUSE and SLES * Notification Cancel Action Plan * Fix migrate action failure * Add exception log when migrate action failed * Add cdm-scoping * [Doc] Fix host option * Use Property setters * Update the description for controller node * Updated from global requirements * cleanup test-requirements * Update the "IAAS" to "IaaS" * Correct the link for watcher cli * Update the documentation link for doc migration * extend-node-status * Updated from global requirements * Fix Watcher DB schema creation * Fix Action 'change\_node\_power\_state' FAILED * Updated from global requirements * Fix incorrect config section name of configure doc * Fix Gate Failure * Remove redundant right parenthesis * Utils: fix usage of strtime * Update the documentation link for doc migration * iso8601.is8601.Utc No Longer Exists * Remove the unused rootwrap config * Remove unused efficacy indicators * Replace DbMigrationError with DBMigrationError * Replace default gnocchi endpoint type * Fix gnocchiclient creation * Fix DEFAULT\_SCHEMA to validate host\_aggreates * Updated from global requirements * Modify display\_name in strategy documentation * [Trivialfix]Fix typos in watcher * Restrict existing strategies to their default scope * Update default Nova API version to 2.53(Pike) * Fix to use . to source script files * Fix to use "." to source script files * Update the documentation link for doc migration * Updated from global requirements * Updated from global requirements * Remove unnecessary dict.keys() method calls (api) * Update the documention for doc migration * Remove watcher\_tempest\_plugin * Updated from global requirements * Fix KeyError exception * Remove pbr warnerrors * Adjust the action state judgment logic * Update reno for stable/pike 1.4.0 ----- * workload balance base on cpu or ram util * [Doc] Fix db creation * get\_config\_opts method was overwritten * Replace map/filter lambda with comprehensions * change ram util metric * Fix failure to load storage plugin * Fix exception.ComputeNodeNotFound * Updated from global requirements * Change exception class from monascaclient * Fix gnocchi repository URL in local.conf.controller * Fix ironic client input parameter * Fix show db version in README * Removed unnecessary setUp calls in tests * Fix compute CDM to include disabled compute node * Update State diagram of Action Plan * Modification of statistic\_aggregation method * Fix incorrect action status in notifications * Added Actuator Strategy * [Doc] Update software version * Fix continuous audit fails once it fails * Updated from global requirements * Fix Hardcoded availability zone in nova-helper * Saving Energy Strategy 1.3.0 ----- * Fix gate-watcher-python27-ubuntu-xenial FAILURE * dynamic action description * [Doc] Add cinder to architecture diagram * Add release notes for Pike * [Doc] Add Configure Cinder Notifications * Update the documention for doc migration * Remove all sphinx warnings * Update the documention for doc migration * Replace voluptuous with JSONSchema in BaseAction * Update URLs in documents according to document migration * Updated from global requirements * Remove testenv for install-guide * Add volume migrate action * Fix devstack plugin * Enable migration to rely on nova-scheduler * Update default ironic endpoint type * Updated from global requirements * remove useless logging * New cron type for audit interval * Fix dbmanage upgrade and downgrade * Update weekly meetings time in docs * Add title to administrator guide * Abort operation for live migration * [Doc] Add gnocchi to system architecture diagram * Ignore autogenerated sample config file * bug fix: Can't get sample through CeilometerHelper * Replace voluptuous with JSONSchema to validate change\_node\_power\_state * move doc/source/webapi content to doc/source/api * Cinder model integration * Update Documentation link in README * Adapt watcher documentation for new standards * Replace default neutron endpoint type * switch to openstackdocs theme * Replace default glance endpoint type * Fix test\_list\_with\_limit failed * Replace the usage of 'manager' with 'os\_primary' * Updated from global requirements * avoid repeated actions in the solution * Update .gitignore * Pass environment variables of proxy to tox * Enable some off-by-default checks * Updated from global requirements * Fix get\_action\_plan\_list filter error * node.status for vm\_workload\_consolidation * Noisy Neighbor Strategy * Updated from global requirements * fix Keyerror in test\_nova\_cdmc * Add action for compute node power on/off * Replace voluptuous with JSONSchema to validate migration action * Updated from global requirements * Replace voluptuous with JSONSchema to validate change\_nova\_service\_state * Replace voluptuous with JSONSchema to validate resize action * Replace voluptuous with JSONSchema to validate sleep action * Replace voluptuous with JSONSchema to validate nop action * Remove log translations and hacking * Remove deprecated oslo\_messaging.get\_transport 1.2.0 ----- * Cancel Action Plan * fix multinode tempest test failure * Updated from global requirements * Add rm to whitelist\_externals in tox.ini * Remove usage of parameter enforce\_type * Replace default cinder endpoint type * Add action description * Watcher official install-guide * Trivial fix typos * Replace oslo\_utils.timeutils.isotime * Updated from global requirements * Deleted audit record still get by 'audit list'cmd * Versioned Notifications for service object * fix clod\_migrate problem * Change cinder api\_version to '3' in default * Updated from global requirements * doc error for WeightPlanner * Remove the deprecated tempest.test.attr * Replace assertRaisesRegexp with assertRaisesRegex * Updated from global requirements * [bugfix]for division use accurate division * Fix a typo * Updated from global requirements * Add Watcher JobStore for background jobs * Updated from global requirements * Add host\_aggregates in exclude rule of audit scope * replace nova endpoint * Add 'rm -f .testrepository/times.dbm' command in testenv * [Doc] fix local.conf.compute * [bugfix]retry is reached but action still success * use instance data replace exception.NoDataFound * Set access\_policy for messaging's dispatcher * Fix devstack plugin * [Doc] messaging -> messagingv2 * Add ironicclient 1.1.0 ----- * Updated from global requirements * Added suspended audit state * Add gnocchi support in uniform\_airflow strategy * Add Apache License Content in index.rst * Optimize the link address * correct syntax error * Updated from global requirements * exception when running 'watcher actionplan start XXX' * Optimize the link address * Add gnocchi support in outlet\_temp\_control strategy * fixed syntax error in json * Replace py34 with py35 * Add gnocchi support in workload\_balance strategy * Add gnocchi plugin support for devstack * Updated from global requirements * Run Watcher-API behind mod-wsgi * oslo messaging notifications driver update * Use tox to generate a sample configuration file * Added tempest test for workload\_stabilization * Add gnocchi support in VM-Workload-Consolidation strategy * Updated from global requirements * Fix for remove verbose option * Use HostAddressOpt for opts that accept IP and hostnames * Add gnocchi support in workload\_stabilization strategy * Prevent the migration of VM with 'optimize' False in VM metadata * Add period input parameter to vm workload consolidation and outlet temp control strategy * Add endpoint\_type option for openstack clients * Updated from global requirements * Add gnocchi support in basic\_consolidation strategy * Imported Translations from Zanata * Remove log translations * Add Gnocchi datasource * exception when running 'watcher service list' * Remove old oslo.messaging transport aliases * stale the action plan * Local copy of scenario test base class * set eager=True for actionplan.list * Use https instead of http * Updated from global requirements * Reduced the code complexity * Updated from global requirements * Updated from global requirements * Updated from global requirements * Adding instance metadata into cluster data model * Add Apache License content in conf.py file * [Fix gate]Update test requirement * Remove unused PNG files in image\_src directory * Updated from global requirements * Fix no endpoints of ceilometer in devstack environment setup * Fix some typos in vm\_workload\_consolidation.py * Optimize audit process * Reactivate watcher dashboard plugin in devstack/local.conf.controller * Add SUPERSEDED description * Add Action Notification * Switch to use test\_utils.call\_until\_true * Adding additional details to notification logs * Add checking audit state * Fix the mapping between the instance and the node * Remove support for py34 * Fix that remove 'strategy' attribute does not work * Fix spelling error in NotificationEndpoint classes * Fix log level error to warning * Fix incorrect auto trigger flag * Using items() instead of six.iteritems() * Update reno for stable/ocata 1.0.0 ----- * Added action\_plan.execution.\* actions * Added action\_plan.create|update|delete notifs * Add release note for action plan notifications * Add first alembic version for db migration * Use RPC cast() to be asynchronous * Updated graph model to use attr\_dict * Fix context error for user * Idiomatic loop for calculate\_num\_migrations * Fix multinode tempest test failure 0.34.0 ------ * add Ocata release notes * Add period input parameter to basic strategy * Fix invalid mock on ceilometerclient * Documentation update * Updated from global requirements * New Applier Workflow Engine * Remove obsolete Resource element * Graph cluster model instead of mapping one * Fix building of model with a scoped exclusion rule * Fix broken gates because of wrong pip command * Fix test\_clients\_monasca failure * Updated from global requirements * New default planner * Modify the field in tox.ini * Add action plan SUPERSEDED state * Fix dummy strategy to use input parameters * Updated from global requirements * Update Server Consolidation global efficacy * Fix a typo in watcher/objects/base.py * resolve KeyError exception * Enable notification for vm task state update * Should use glanceclient to get images * Multi datasource support for Basic Consolidation * Added Monasca Helper * Removed unnecessary utf-8 encoding * Updated from global requirements * Enable coverage report in console output * Fix TypeError if no input\_parameters added * Update configuration document * Add additional depencencies of CentOS 7 * Fix reference http * remove incorrect inline comment * Add auto\_trigger support to watcher * Fix variable name error * Updated from global requirements * Updated from global requirements * Fix bad CDMC update on reception of service.update 0.33.0 ------ * Implemented clients and auth config module * Implemented wacther decision engine config module * Documentation for Uniform Airflow Migration Strategy Fixed issues * Implemented applier config module * Implemented planner config module * Implemented db config module * Implemented exception config module * Implemented paths config module * remove unused log * Repair log parameter error * multinode devstack update for live-migration * Function call pass parameter error * Documentation for Workload Balance Migration Strategy Fixed comments and added the doc primitive call * Specific exception for stale cluster state was added * Implemented utils config module * Fix CI failures * improve statistic\_aggregation * Unnecessary exception * update strategy table when parameters\_spec changes * Implemented api config module * Updated from global requirements * Updated from global requirements * Improve the instruction of vm\_workload\_consolidation * Fix method name in doc/source/dev/plugin/action-plugin.rst * Repairing unit test failures * Fix some incorrect description in doc * [Doc] Fix example code of goal plugin * Use uuidutils instead of uuid.uuid4() * Modify the variable assignment errors * Show team and repo badges on README * Fix 'ImportError' when docbuild * Fix one ref that does not work * Updated from global requirements * Add periods input parameter * Solve some spelling mistakes * Remove redundan lines * Documentation for Outlet Temperature Based Strategy Fixed outstanding comments * Change hardware.cpu\_util in workload\_stabilization * Fix inconsistent descriptions in docstring in action\_plan.py * Removed nullable flag from audit\_id in ActionPlan * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Fixed update of WatcherObject fields on update * Fix some typos in action.py & action\_plan.py & audit.py * [Doc] Fix default value in workload\_stabilization * Fix the wrong ref for 'Compute node' 0.32.0 ------ * Implemented base + moved plugins & service conf * Add audit.planner events * Add audit.strategy events * Implemented audit.delete notification * Implemented audit.create notification * Implemented audit.update notification * Update devstack plugin to add notification param * Added notification\_level config option * Removed status\_topic config parameter * Remove stale notification code * Added notifications documentation page * Added support for versioned notifications * Add doc for vm\_workload\_consolidation strategy * Fix rally gate test * Updated from global requirements * Add doc for workload-stabilization spec * Fix the typo in efficacy\_indicator module * Fix NoMetricValuesForInstance error * Use oslo\_log instead of logging * optimized 'find\_instance()' * Fix workload stabilization strategy to ignore disabled hosts * Added Tempest API tests for /scoring\_engines * Remove unused SUBMITTED audit state * Added action\_plan ObjectField for Action * Added audit & strategy ObjectField for ActionPlan * Added goal & strategy ObjectField for Audit * Added goal+strategy ObjectField for AuditTemplate * Added 'goal' ObjectField for Strategy object * Refactored Watcher objects to use OVO * Removed deadline, version, extra & host\_aggregate * Eager loading on One-to-X foreign keys * [Doc] Fix strategy list optional argument * Use Enum value instead of String Value Fixing Gating Issue * Change "Openstack" to "OpenStack" * Avoid use xx=[] for parameter to initialize it's value * Transform KB into MB and normalize CPU * Updated from global requirements * Delete python bytecode file * Add strategy template doc * Drop MANIFEST.in - it's not needed by pbr * Add Audit Scope Handler * Add service object to the watcher\_db\_schema * Add service supervisor * Updated from global requirements * Added Model base class + related doc * Add RECOMMENDED state * Enable release notes translation * Added composite unique name constraints * Added missing test on GMR plugin * Moved Watcher doc plugin outside main package * Stop adding ServiceAvailable group option * HasLength() rewritten to assertEqual() * Updated from global requirements * Fix typo in docstring * Remove duplicate unittest * Fix typo in hooks.py * Docstrings should not start with a space * Fix capital letter in doc * Doc updates * Watcher utils cleanup * Fixed GMR configuration issue * Add constraint target to tox.ini and remove 1 dep * Updated from global requirements * 'tox -e py27' failed * Fix typo in docstring from "interprete" to "interpret" * Fix a typo in watcher.po * Deactivate dashboard plugin until fixed * remove redundant word * Fix a typo in basic\_consolidation.py * Update Watcher description * Test code tidy up * Update reno for stable/newton * Fixed issue on compute nodes iteration * Refactored Tests to load scenarios from file * Updated from global requirements * Remove group\_by statement in metric queries 0.30.0 ------ * Add rally-jobs folder to get rally support * Log CDM structure before+after executing strategy * Fixed Tempest test due to notification issues * Use memory mode for sqlite in db test * Added tests on API hooks and related context * When action plan is empty, its state is incorrect * Use parameters instead of config for workload stabilization * Add documentation for Scoring Module * Implemented GMR plugin to show CDM structures * Fix incorrect strings and formatting * Modify use of assertTrue(A in B) * Fixed indentation * The default value of 'is\_admin\_project' 0.29.0 ------ * Add release notes for Newton blueprints * TrivialFix: Remove cfg import unused * TrivialFix: Remove logging import unused * Remove unused LOG * Update configuration section for notifications * Doc on how to add notification endpoints * Notification and CDM partial update * Remove unreachable line * Added start/end date params on ceilometer queries * Correct watcher reraising of exception * Check unspecified parameters create audit * Fix loading of plugin configuration parameters * Add Scoring Module implementation * Add unit tests for continuous.py * Updated from global requirements * Fixed flaky tempest test * Remove pot files * Updated from global requirements * Added strategy ID + Action Plan syncing * Fixes to get cluster data model * Updated from global requirements * Fix double self.\_goal definition * Scheduler of decision\_engine fix * Updated from global requirements * Clean imports in code * Modify libvirt\_opts="-d -l" to libvirtd\_opts="-d -l" * Rename (pre/post)condition to (pre/post)\_condition * Add unit tests for nova\_helper.py * Updated from global requirements * Removed unused function in uniform airflow * Update the home-page info with the developer documentation * Updated from global requirements * Refactored the compute model and its elements * Use more specific asserts * Merged metrics\_engine package into decision\_engine * Updated DE architecture doc + 'period' param * Added DE Background Scheduler w/ model sync jobs * Cluster data model collector plugin documentation * Loadable Cluster Data Model Collectors * Updated from global requirements * Add scoring engines to database and API layers * Implement goal\_id, strategy\_id and host\_aggregate into Audit api * use parameters to set the threshold * Updated from global requirements * Fixed Basic optim tempest test * Fix 2 occurrences of typo: "occured" --> "occurred" * Add hacking checks to watcher * Update docs links to docs.openstack.org * Remove discover from test-requirements * Updated from global requirements * Fix typos and messages in strategies * Remove unused columns parameters in watcher/db/api * test\_context\_hook\_before\_method failed * Fix dict.keys() PY3 compatible 0.28.0 ------ * There are some spelling errors in the code * Add Python 3.5 classifier and venv * Update unitaty tests to verify strategy and goal name * Bad goal and strategy name for Airflow Optimization * Fix unittest in test\_api.py * Optimize local.conf.controller file to enable watche-dashboard * Add continuously optimization * Add Desktop Service Store to .gitignore file * Documentation for strategy parameters * Updated from global requirements * Add installation from Debian packages section * Add new documentation section for Watcher policies rules * Update executor to eventlet * Add policies for API access control to watcher project * Fix watcher doc build error * Fix field type to audit\_type * Remove duplicate unittest * Fix link error in base-setup.rst * Fix failing Tempest tests * Enable strategy parameters * Update Docs links to docs.openstack.org * add dependency for 3rd-party plugins * Make default Planner generic to handle new action * Modify IRC weekly meeting time * Uniform Airflow migration strategy implementation * Updated from global requirements * Centralize plugin loaders in watcher/applier * Add importing modules instead of classes * Centralize plugin loaders in decision engine * Add goal\_name field in strategy * Updated from global requirements * Use disabled/enabled to change service state * Check if nova-service is already disabled * Add bandit in tox -e pep8 * Added filter operators * Fix StrategyContext to use the strategy\_id in the Audit Template * Use proper theme for release notes * Fix releasenotes generation * Documentation on goal and efficacy * Added efficacy indicators to /action\_plans * Added pre/post execution methods to strategies * Added EfficacyIndicator object * Added efficacy specification to /goals * Add reno for release notes management * Added EfficacyIndicator model in DB * Decoupled Goal from Strategy * Fix broken link in doc * Added missing config section for autogeneration * Updated from global requirements * Added audit\_template filter to /audits/detail * Add fix for hardware.cpu.util meter in sd-strategy * Add fix for \_\_init\_\_() error * Updated tempest test creds retrieval mechanism 0.27.0 ------ * Documentation for plugins-parameters * Workload balance migration strategy implementation * Watcher plugins table in Guru meditation reports * Enabled config parameters to plugins * Add Overload standard deviation strategy * Add goal name as filter for strategy list cmd * Update Watcher documentation * Updated from global requirements * Added cold VM migration support * Add goal\_name & strategy\_name in /audit\_templates * Replace assertEqual(None, \*) with assertIsNone in tests * Fix lazy translation issue with watcher-db-manage * Fixed flaky tempest test * Removed telemetry tag from tempest tests * Updated from global requirements * Fix for statistic\_aggregation * Remove direct access to dbapi * Updated from global requirements * Fix documentation watcher sql database * Watcher DB class diagram * Added .pot file * Remove [watcher\_goals] config section * Remove watcher\_goals section from devstack plugin * Documentation update for get-goal-from-strategy * Updated purge to now include goals and strategies * Syncer now syncs stale audit templates * Add strategy\_id & goal\_id fields in audit template * Refactored Strategy selector to select from DB * Added /strategies endpoint in Watcher API * Add Goal in BaseStrategy + Goal API reads from DB * DB sync for Strategies * Added Strategy model * Added Goal object + goal syncing * Added Goal model into Watcher DB * Log "https" if using SSL * [nova\_helper] get keypair name by every admin users * Remove using of UUID field in POST methods of Watcher API * Refactored DE and Applier to use oslo.service * Refactored Watcher API service * Updated from global requirements * Removed unused 'alarm' field * Add parameters verification when Audit is being created * correct the available disk, memory calculating Source data are misused in outlet temperature strategy. This patch fixes it * Upgrade Watcher Tempest tests for multinode * Update .coveragerc to ignore abstract methods * Updated from global requirements * Fix for deleting audit template * Remove unused logging import and LOG global var * Updated from global requirements 0.26.0 ------ * Added missing support for resource states in unicode format in VM workload consolidation strategy * Disabled PATCH, POST and DELETE for /actions * Added information on plugin mechanism to glossary * Invalid states for Action Plan in the glossary * Integrated consolidation strategy with watcher * Added oslo.context to requirements.txt 0.25.0 ------ * Remove the watcher sample configuration file * Updated action-plugin doc to refer to Voluptuous * Rename variable vm\_avg\_cpu\_util * renamed "efficiency" with "efficacy" Closes-Bug:#1558468 * Remove true/false return from action.execute() * Updated from global requirements * Documentation on purge command * Added purge script for soft deleted objects * Added Mixin-related filters on DB queries * Updated from global requirements * Refactored check for invalid goal * Renamed api.py to base.py in metrics engine * Re-generated the watcher.pot * Added Disk Capacity in cluster-data-model * Removing unicode from README.rst 0.24.0 ------ * Doc on how to set up a thirdparty project * Remove tests omission from coverage target in tox.ini * add Goal into RESTful Web API (v1) documentation * Updated Strategy plugin doc * Doc on how to implement a custom Watcher planner * Add Watcher dashboard to the list of projects * Doc on how to implement a custom Watcher action * Fixed wrongly used assertEqual method * Improve DevStack documentation for beginners * Added support for live migration on non-shared storage * Updated Watcher doc to mention Tempest tests * RST directive to discover and generate drivers doc * Rename 'TRIGGERED' state as 'PENDING' * Fixed type in get\_audit\_template\_by\_name method * Updated from global requirements * Cleanup in tests/\_\_init\_\_.py * Update nova service state * Replace "Triggered" state by "Pending" state * Add start directory for oslo\_debug\_helper * Add missing requirements * Updated from global requirements * Re-enable related Tempest test * Useless return statement in validate\_sort\_dir * Pass parameter to the query in get\_last\_sample\_values * Remove unused function and argument * Added goal filter in Watcher API * Improve variable names in strategy implementations * Added unit tests on actions * Clean imports in code * Add Voluptuous to validate the action parameters * Remove KEYSTONE\_CATALOG\_BACKEND from DevStack plugin * Cleanup in test\_objects.py * Better cleanup for Tempest tests * Ceilometer client instantiation fixup * Update the default version of Neutron API * Sync with openstack/requirements master branch * Delete linked actions when deleting an action plan 0.23.2 ------ * Add IRC information into contributing page * Update docs for password auth configuration options * Remove references to SERVERS\_CONSOLIDATION * Create OpenStackClients convenience class * Added Tempest scenario for BASIC\_CONSOLIDATION * Use install instead of mkdir for DevStack dirs * Removed unused parameter in dt\_deserializer() * Remove unused parameter in Actions API controller * Define self.client in MessagingCore * Remove InvalidParameterValue exception * Tempest API tests on /actions * GET on an action\_plan provides first\_action\_uuid * Fixed ActionPlanNotFound typo in msg\_fmt 0.23.1 ------ * Fixed tempest test bug 0.23.0 ------ * Action plan state transition - payload validation * Add 'workers' section into configuration doc * API Tempest tests on goals * Fix HTML warnings on HTML doc * Action Plan state - Changed STARTING to TRIGGERED * Tempest scenario - execute a dummy strategy * Added doc8 * Add reference to Ceilometer developer guide * API Tempest tests on Action plans * Re-organize the Wacher documentation Home Page * Fix 'Module index' broken HTTP link * API Tempest tests on Audits * Refactored existing tempest API tests * Renamed Status to State * Update the user-guide to explain the main steps * Refactor Commands section * Use taskflow library for building and executing action plans * Removed unused parameters from api controllers * Validate audit template UUID on audit create * Add diagrams to the architecture doc page * Fix Warnings generated while building of HTML docu * Reduced the complexity of the execute() method * Missing super() in API collection controllers * Remove shadow BaseException class * Replace message with msg\_fmt for custom exceptions * Removed use of deprecated LOG.warn method * Add a dynamic loading of Actions handlers in the Watcher Applier * Update API documentation for action plan * Renamed diskInfo.py * Fix extraction of \_LI \_LW \_LE \_LC for translation * Clean up flake8 ignore list * Move terminology definition to class related * Keep py3.X compatibility for urllib * Use dict.items() dirrectly instead of six.iteritems * Test: make enforce\_type=True in CONF.set\_override and fix error * Remove incorrect spaces for libvirt\_opts value * Add a generic and extensible way to describe the flow of actions * Add a dynamic loading of the Watcher Planner implementation * Add a common generic dynamic loader for watcher * Add the possibility to store several parameters for an Action * Changed testr to os-testr * Strategy goals should be required in conf * Use assertTrue/False instead of assertEqual(T/F) * Implement DevStack plugin * Remove useless Meta-Action 0.22.0 ------ * outlet Temperature based migration strategy * Move Audit-template management in DefaultStrategyContext * Remove duplicated nova wrapper * Move glossary.rst to root folder of doc * Remove string concatenation in favor of string formatting * Remove useless event factory * Rename NovaWrapper to NovaClient * i18n - Make string translatable * Change default strategy to DummyStrategy * Add Creative Commons Attribution header to documentation * Code refactoring - StrategyContext and Auditendpoint * Remove \*.pyc files before running tox tests * Add missing parameter in prepare\_service for api * Fix generation of watcher config file * Rename command to audit * 'admin\_user' opt (and others) imported twice * Removed duplicated function prepare\_service() * Internationalization (i18n) - Enable French locale * Include terminology definition from docstring * Remove pragma no cover from code * Tidy up - Watcher Decision Engine package * Typo in ClusteStateNotDefined * Some tests are ignored * Tidy up - Rename Base * Refactored Watcher codebase to add py34 support * Added unit tests on nova wrapper * Removed H404, H405, H305 ignore in pep8 * Removed unnecessary code from basic\_consolidation * Remove unreachable code in basic\_consolidation.py * Rename Mapper to Mapping * Tidy up - Primitive * Remove references to removed watcher/openstack directory * Removed py33, pypy support * Remove alembic revision of watcher db * Add Apache license header to all rst documentation * Rename Command to Action * Update the glossary to lay down Watcher terminology * Rename command to action\_plan * Removed unused enum * Rename Meta-Action to Action * Add a checker for the documentation * Rename efficiency to efficacy * Fix Watcher Applier variables in CamelCase * Remove duplicate setup in Watcher API main() * Cleanup deprecated documentation * Provide detailed information on architecture 0.21.0 ------ * Update configuration section for rabbitmq * Created a glossary to lay down Watcher terminology * Update documentation regarding Ceilometer API V2 * Fixed missing attribute in the data model * Removed py26 support * Code refactoring - Watcher Applier package * Removed old (and unused) openstack/oslo libs * Fixed doc generation warning * add missing keystoneclient dependency * Added priority level to Nop action * Removed 'watcher\_messaging' to use oslo.messaging * Improve OpenStack clients API * Added 'dummy' entrypoint within watcher\_strategies * Fixed tense consistency (used past) in statuses * Added LaunchActionPlanCommand's init super call * Explained KEYSTONE\_SERVICE\_PROJECT\_NAME variable * Added missing super call in DefaultApplier's init * AMQP Channel have to be set espacially to 'watcher' * Updated the config sample file generation command * Code refactoring - Watcher Decision Engine package * Strategy plugins documentation * Update requirements from OS Global Requirements * Made Decision Engine extensible via stevedore * Integration of Ceilometer in Watcher * Update configuration file sample * Fix config loading when running the watcher-api * Fix tiny typo on configuration doc * Avoid dividing zero * Should use watcher/common/context.py for consistency * Update policy.py * Watcher notification listener is associated with an executor which integrates the listener with a specific I/O handling framework. blocking executor should be the default one * Update Rabbit MQ server configuration extract * Use a single command to create the 3 watcher endpoints * Use i18n directly * Use olso.log directly * refactoring documentation * Change stackforge to openstack, corrected some heading underlines * use https instead of http for doc link * update Watcher mission * consolidation of watcher * update config file sample * update documentation * fix dependencies version * Documention fixes * initial version * Added .gitreview ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/HACKING.rst0000664000175000017500000000056100000000000016455 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ========================== watcher Style Commandments ========================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/LICENSE0000664000175000017500000002363700000000000015675 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6591353 python_watcher-14.0.0/PKG-INFO0000644000175000017500000000742600000000000015761 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: python-watcher Version: 14.0.0 Summary: OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Home-page: https://docs.openstack.org/watcher/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.9 License-File: LICENSE Requires-Dist: apscheduler>=3.5.1 Requires-Dist: eventlet>=0.27.0 Requires-Dist: jsonpatch>=1.21 Requires-Dist: keystoneauth1>=3.4.0 Requires-Dist: jsonschema>=3.2.0 Requires-Dist: keystonemiddleware>=4.21.0 Requires-Dist: lxml>=4.5.1 Requires-Dist: croniter>=0.3.20 Requires-Dist: os-resource-classes>=0.4.0 Requires-Dist: oslo.concurrency>=3.26.0 Requires-Dist: oslo.cache>=1.29.0 Requires-Dist: oslo.config>=6.8.0 Requires-Dist: oslo.context>=2.21.0 Requires-Dist: oslo.db>=4.44.0 Requires-Dist: oslo.i18n>=3.20.0 Requires-Dist: oslo.log>=3.37.0 Requires-Dist: oslo.messaging>=14.1.0 Requires-Dist: oslo.policy>=4.5.0 Requires-Dist: oslo.reports>=1.27.0 Requires-Dist: oslo.serialization>=2.25.0 Requires-Dist: oslo.service>=1.30.0 Requires-Dist: oslo.upgradecheck>=1.3.0 Requires-Dist: oslo.utils>=7.0.0 Requires-Dist: oslo.versionedobjects>=1.32.0 Requires-Dist: PasteDeploy>=1.5.2 Requires-Dist: pbr>=3.1.1 Requires-Dist: pecan>=1.3.2 Requires-Dist: PrettyTable>=0.7.2 Requires-Dist: gnocchiclient>=7.0.1 Requires-Dist: python-cinderclient>=3.5.0 Requires-Dist: python-glanceclient>=2.9.1 Requires-Dist: python-keystoneclient>=3.15.0 Requires-Dist: python-monascaclient>=1.12.0 Requires-Dist: python-neutronclient>=6.7.0 Requires-Dist: python-novaclient>=14.1.0 Requires-Dist: python-observabilityclient>=0.3.0 Requires-Dist: python-openstackclient>=3.14.0 Requires-Dist: python-ironicclient>=2.5.0 Requires-Dist: SQLAlchemy>=1.2.5 Requires-Dist: stevedore>=1.28.0 Requires-Dist: taskflow>=3.8.0 Requires-Dist: WebOb>=1.8.5 Requires-Dist: WSME>=0.9.2 Requires-Dist: networkx>=2.4 Requires-Dist: microversion_parse>=0.2.1 Requires-Dist: futurist>=1.8.0 ======= Watcher ======= .. image:: https://governance.openstack.org/tc/badges/watcher.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on .. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Watcher provides a robust framework to realize a wide range of cloud optimization goals, including the reduction of data center operating costs, increased system performance via intelligent virtual machine migration, increased energy efficiency and more! * Free software: Apache license * Wiki: https://wiki.openstack.org/wiki/Watcher * Source: https://opendev.org/openstack/watcher * Bugs: https://bugs.launchpad.net/watcher * Documentation: https://docs.openstack.org/watcher/latest/ * Release notes: https://docs.openstack.org/releasenotes/watcher/ * Design specifications: https://specs.openstack.org/openstack/watcher-specs/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/README.rst0000664000175000017500000000221300000000000016342 0ustar00zuulzuul00000000000000======= Watcher ======= .. image:: https://governance.openstack.org/tc/badges/watcher.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on .. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Watcher provides a robust framework to realize a wide range of cloud optimization goals, including the reduction of data center operating costs, increased system performance via intelligent virtual machine migration, increased energy efficiency and more! * Free software: Apache license * Wiki: https://wiki.openstack.org/wiki/Watcher * Source: https://opendev.org/openstack/watcher * Bugs: https://bugs.launchpad.net/watcher * Documentation: https://docs.openstack.org/watcher/latest/ * Release notes: https://docs.openstack.org/releasenotes/watcher/ * Design specifications: https://specs.openstack.org/openstack/watcher-specs/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.5991352 python_watcher-14.0.0/api-ref/0000775000175000017500000000000000000000000016200 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6071353 python_watcher-14.0.0/api-ref/source/0000775000175000017500000000000000000000000017500 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/conf.py0000664000175000017500000000500600000000000021000 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # nova documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. extensions = [ 'openstackdocstheme', 'os_api_ref', ] # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Infrastructure Optimization API Reference' copyright = u'2010-present, OpenStack Foundation' # openstackdocstheme options openstackdocs_repo_name = 'openstack/watcher' openstackdocs_auto_name = False openstackdocs_bug_project = 'watcher' openstackdocs_bug_tag = '' # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "sidebar_mode": "toc", } # -- Options for LaTeX output ------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Watcher.tex', u'Infrastructure Optimization API Reference', u'OpenStack Foundation', 'manual'), ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/index.rst0000664000175000017500000000102600000000000021340 0ustar00zuulzuul00000000000000:tocdepth: 2 =========== Watcher API =========== .. rest_expand_all:: .. include:: watcher-api-versions.inc .. include:: watcher-api-v1-audittemplates.inc .. include:: watcher-api-v1-audits.inc .. include:: watcher-api-v1-actionplans.inc .. include:: watcher-api-v1-actions.inc .. include:: watcher-api-v1-goals.inc .. include:: watcher-api-v1-strategies.inc .. include:: watcher-api-v1-services.inc .. include:: watcher-api-v1-scoring_engines.inc .. include:: watcher-api-v1-datamodel.inc .. include:: watcher-api-v1-webhooks.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/parameters.yaml0000664000175000017500000003450200000000000022533 0ustar00zuulzuul00000000000000# variables in header header_version: description: | Specific API microversion used to generate this response. in: header required: true type: string openstack-api-max-version: description: | Maximum API microversion supported by this endpoint, eg. "1.1" in: header required: true type: string openstack-api-min-version: description: | Minimum API microversion supported by this endpoint, eg. "1.0" in: header required: true type: string openstack-api-version: description: > A request SHOULD include this header to indicate to the Watcher API service what version the client supports. The server will transform the response object into compliance with the requested version, if it is supported, or return a 406 Not Acceptable error. If this header is not supplied, the server will response with server minimum supported version. in: header required: true type: string openstack-request-id: description: > An unique ID for tracking the request. The request ID associated with the request appears in the log lines for that request. By default, the middleware configuration ensures that the request ID appears in the log files. in: header required: false type: string # Path action_ident: description: | The UUID of the Action. in: path required: true type: string actionplan_ident: description: | The UUID of the Action Plan. in: path required: true type: string audit_ident: description: | The UUID or name of the Audit. in: path required: true type: string audittemplate_ident: description: | The UUID or name of the Audit Template. in: path required: true type: string goal_ident: description: | The UUID or name of the Goal. in: path required: true type: string scoring_engine_ident: description: | The UUID or name of the Scoring Engine. in: path required: true type: string service_ident: description: | The ID or name of the Service. in: path required: true type: string strategy_ident: description: | The UUID or name of the Strategy. in: path required: true type: string # Query body limit: description: | Requests a page size of items. Returns a number of items up to a ``limit`` value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer marker: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string r_action_plan: description: | UUID of the action plan used for filtering. in: query required: false type: string r_audit: description: | Optional UUID of an audit, to get only actions for that audit. in: query required: false type: string r_goal: description: | The UUID or name of the Goal. in: query required: false type: string r_strategy: description: | The UUID or name of the Strategy. in: query required: false type: string r_type: description: | Type of data model user want to list. Default type is compute. Supported values: compute. Future support values: storage, baremetal. in: query required: false type: string sort_dir: description: | Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. in: query required: false type: string sort_key: description: | Sorts the response by the this attribute value. Default is ``id``. in: query required: false type: string # variables in the API response body # Action action_action_plan_uuid: description: | The action plan this action belongs to. in: body required: true type: string action_description: description: | Action description. in: body required: true type: string action_input_parameters: description: | Input parameters which are used by appropriate action type. For example, ``migration`` action takes into account such parameters as ``migration_type``, ``destination_node``, ``resource_id`` and ``source_node``. To see a list of supported action types and their input parameters visit `Action plugins page `_. in: body required: true type: JSON action_parents: description: | UUIDs of parent actions. in: body required: true type: array action_state: description: | State of Action. in: body required: true type: string action_type: description: | Action type based on specific API action. Actions in Watcher are pluggable, to see a list of supported action types visit `Action plugins page `_. in: body required: true type: string # Action Plan actionplan_audit_uuid: description: | The UUID of the audit this acton plan belongs to. in: body required: false type: string actionplan_efficacy_indicators: description: | The list of efficacy indicators associated to this action plan. in: body required: false type: array actionplan_global_efficacy: description: | The global efficacy of this action plan. in: body required: false type: array actionplan_hostname: description: | Hostname the actionplan is running on in: body required: false type: string actionplan_state: description: | State of this action plan. To get more information about states and action plan's lifecycle, visit `Action Plan State Machine page `_. in: body required: false type: string # Audit audit_autotrigger: description: | Auto execute action plan once audit is succeeded. in: body required: false type: boolean audit_endtime_req: description: | The local time after which audit can't be executed. It will be converted to UTC time by Watcher. in: body required: false type: string min_version: 1.1 audit_endtime_resp: description: | The UTC time after which audit can't be executed. in: body required: false type: string min_version: 1.1 audit_force: description: | Launch audit even if action plan is ongoing. in: body required: false type: boolean min_version: 1.2 audit_goal: description: | The UUID or name of the Goal. in: body required: false type: string audit_hostname: description: | Hostname the audit is running on in: body required: false type: string audit_interval: description: | Time interval between audit's execution. Can be set either in seconds or cron syntax. Should be defined only for CONTINUOUS audits. in: body required: false type: string audit_name: description: | Name of this audit. in: body required: false type: string audit_next_run_time: description: | The next time audit launch. Defined only for CONTINUOUS audits. in: body required: false type: string audit_parameters: description: | The strategy parameters for this audit. in: body required: false type: JSON audit_starttime_req: description: | The local time after which audit can be executed in accordance with interval. It will be converted to UTC time by Watcher. in: body required: false type: string min_version: 1.1 audit_starttime_resp: description: | The UTC time after which audit can be executed in accordance with interval. in: body required: false type: string min_version: 1.1 audit_state: description: | State of this audit. To get more information about states and audit's lifecycle, visit `Audit State Machine page `_. in: body required: true type: string audit_strategy: description: | The UUID or name of the Strategy. in: body required: false type: string audit_type: description: | Type of this audit. Can only be either ONESHOT or CONTINUOUS. in: body required: true type: string # Audit Template audittemplate_description: description: | Short description of the Audit Template. in: body required: false type: string audittemplate_goal: description: | The UUID or name of the Goal. in: body required: true type: string audittemplate_name: description: | The name of the Audit template. in: body required: true type: string audittemplate_scope: description: | Audit Scope. in: body required: false type: JSON audittemplate_strategy: description: | The UUID or name of the Strategy. in: body required: false type: string audittemplate_uuid: description: | The UUID of the Audit template. in: body required: true type: string created_at: description: | The date and time when the resource was created. The date and time stamp format is `ISO 8601 `_ in: body required: true type: string deleted_at: description: | The date and time when the resource was deleted. The date and time stamp format is `ISO 8601 `_ in: body required: true type: string # Goal goal_display_name: description: | Localized name of the goal. in: body required: true type: string goal_efficacy_specification: description: | Efficacy specifications as result of stategy's execution. in: body required: true type: array goal_name: description: | Name of the goal. in: body required: true type: string goal_uuid: description: | Unique UUID for this goal. in: body required: true type: string links: description: | A list of relative links. Includes the self and bookmark links. in: body required: true type: array # Data Model Node node_disk: description: | The Disk of the node(in GiB). in: body required: true type: integer node_disk_ratio: description: | The Disk Ratio of the node. in: body required: true type: float node_hostname: description: | The Host Name of the node. in: body required: true type: string node_memory: description: | The Memory of the node(in MiB). in: body required: true type: integer node_memory_ratio: description: | The Memory Ratio of the node. in: body required: true type: float node_state: description: | The State of the node. The value is up or down. in: body required: true type: string node_uuid: description: | The Unique UUID of the node. in: body required: true type: string node_vcpu_ratio: description: | The Vcpu ratio of the node. in: body required: true type: float node_vcpus: description: | The Vcpu of the node. in: body required: true type: integer # Scoring Engine scoring_engine_description: description: | A human readable description of the Scoring Engine. in: body required: true type: string scoring_engine_metainfo: description: | A metadata associated with the scoring engine in: body required: true type: string scoring_engine_name: description: | The name of the scoring engine. in: body required: true type: string # Data Model Server server_disk: description: | The Disk of the server. in: body required: true type: integer server_memory: description: | The Memory of server. in: body required: true type: integer server_name: description: | The Name of the server. in: body required: true type: string server_state: description: | The State of the server. in: body required: true type: string server_uuid: description: | The Unique UUID of the server. in: body required: true type: string server_vcpus: description: | The Vcpu of the server. in: body required: true type: integer # Service service_host: description: | The Name of host where service is placed on. in: body required: true type: string service_id: description: | The ID of service. in: body required: true type: integer service_last_seen_up: description: | The Time when Watcher service sent latest heartbeat. in: body required: true type: string service_name: description: | The Name of service like ``watcher-applier``. in: body required: true type: string service_status: description: | The State of service. It can be either in ACTIVE or FAILED state. in: body required: true type: string # Strategy strategy_check_comment: description: | Requirement comment. in: body required: true type: string strategy_check_mandatory: description: | Whether this requirement mandatory or not. in: body required: true type: boolean strategy_check_state: description: | State of requirement for Strategy. in: body required: true type: string or JSON strategy_check_type: description: | Type of requirement for Strategy. in: body required: true type: string strategy_display_name: description: | Localized name of the strategy. in: body required: true type: string strategy_name: description: | Name of the strategy. in: body required: true type: string strategy_parameters_spec: description: | Parameters specifications for this strategy. in: body required: true type: JSON strategy_uuid: description: | Unique UUID for this strategy. in: body required: true type: string updated_at: description: | The date and time when the resource was updated. The date and time stamp format is `ISO 8601 `_ in: body required: true type: string uuid: description: | The UUID for the resource. in: body required: true type: string # Version version: description: | Versioning of this API response, eg. "1.1". in: body required: true type: string version_description: description: | Descriptive text about the Watcher service. in: body required: true type: string version_id: description: | Major API version, eg, "v1" in: body required: true type: string versions: description: | Array of information about currently supported versions. in: body required: true type: array ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/api-ref/source/samples/0000775000175000017500000000000000000000000021144 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/actionplan-cancel-request-cancelling.json0000664000175000017500000000014000000000000031170 0ustar00zuulzuul00000000000000[ { "op": "replace", "value": "CANCELLING", "path": "/state" } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/actionplan-cancel-request-pending.json0000664000175000017500000000013700000000000030523 0ustar00zuulzuul00000000000000[ { "op": "replace", "value": "CANCELLED", "path": "/state" } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/actionplan-list-detailed-response.json0000664000175000017500000000173600000000000030554 0ustar00zuulzuul00000000000000{ "action_plans": [ { "state": "ONGOING", "efficacy_indicators": [], "strategy_uuid": "7dae0eea-9df7-42b8-bb3e-313958ff2242", "global_efficacy": [], "links": [ { "rel": "self", "href": "http://controller:9322/v1/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" }, { "rel": "bookmark", "href": "http://controller:9322/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" } ], "updated_at": "2018-04-10T11:59:52.640067+00:00", "strategy_name": "dummy_with_resize", "deleted_at": null, "uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf", "audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a", "created_at": "2018-04-10T11:59:52.640067+00:00", "hostname": "controller" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/actionplan-list-response.json0000664000175000017500000000153100000000000026774 0ustar00zuulzuul00000000000000{ "action_plans": [ { "state": "ONGOING", "efficacy_indicators": [], "strategy_uuid": "7dae0eea-9df7-42b8-bb3e-313958ff2242", "global_efficacy": [], "links": [ { "rel": "self", "href": "http://controller:9322/v1/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" }, { "rel": "bookmark", "href": "http://controller:9322/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" } ], "updated_at": "2018-04-10T11:59:52.640067+00:00", "strategy_name": "dummy_with_resize", "uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf", "audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/actionplan-show-response.json0000664000175000017500000000126700000000000027007 0ustar00zuulzuul00000000000000{ "state": "ONGOING", "efficacy_indicators": [], "strategy_uuid": "7dae0eea-9df7-42b8-bb3e-313958ff2242", "global_efficacy": [], "links": [ { "rel": "self", "href": "http://controller:9322/v1/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" }, { "rel": "bookmark", "href": "http://controller:9322/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" } ], "updated_at": "2018-04-10T11:59:52.640067+00:00", "strategy_name": "dummy_with_resize", "uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf", "audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a", "hostname": "controller" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/actionplan-start-response.json0000664000175000017500000000137500000000000027164 0ustar00zuulzuul00000000000000{ "state": "PENDING", "efficacy_indicators": [], "strategy_uuid": "7dae0eea-9df7-42b8-bb3e-313958ff2242", "global_efficacy": [], "links": [ { "rel": "self", "href": "http://controller:9322/v1/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" }, { "rel": "bookmark", "href": "http://controller:9322/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" } ], "updated_at": "2018-04-10T11:59:41.602430+00:00", "strategy_name": "dummy_with_resize", "uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf", "audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a", "created_at": "2018-04-10T11:59:12.592729+00:00", "deleted_at": null, "hostname": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/actions-list-detailed-response.json0000664000175000017500000000174700000000000030066 0ustar00zuulzuul00000000000000{ "actions": [ { "state": "PENDING", "description": "Wait for a given interval in seconds.", "parents": [ "8119d16e-b419-4729-b015-fc04c4e45783" ], "links": [ { "rel": "self", "href": "http://controller:9322/v1/actions/7182a988-e6c4-4152-a0d6-067119475c83" }, { "rel": "bookmark", "href": "http://controller:9322/actions/7182a988-e6c4-4152-a0d6-067119475c83" } ], "action_plan_uuid": "c6bba9ed-a7eb-4370-9993-d873e5e22cba", "uuid": "7182a988-e6c4-4152-a0d6-067119475c83", "deleted_at": null, "updated_at": null, "input_parameters": { "duration": 3.2 }, "action_type": "sleep", "created_at": "2018-03-26T11:56:08.235226+00:00" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/actions-list-response.json0000664000175000017500000000132400000000000026304 0ustar00zuulzuul00000000000000{ "actions": [ { "state": "PENDING", "parents": [ "8119d16e-b419-4729-b015-fc04c4e45783" ], "links": [ { "rel": "self", "href": "http://controller:9322/v1/actions/7182a988-e6c4-4152-a0d6-067119475c83" }, { "rel": "bookmark", "href": "http://controller:9322/actions/7182a988-e6c4-4152-a0d6-067119475c83" } ], "action_plan_uuid": "c6bba9ed-a7eb-4370-9993-d873e5e22cba", "uuid": "7182a988-e6c4-4152-a0d6-067119475c83", "action_type": "sleep" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/actions-show-response.json0000664000175000017500000000141700000000000026314 0ustar00zuulzuul00000000000000{ "state": "SUCCEEDED", "description": "Logging a NOP message", "parents": [ "b4529294-1de6-4302-b57a-9b5d5dc363c6" ], "links": [ { "rel": "self", "href": "http://controller:9322/v1/actions/54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a" }, { "rel": "bookmark", "href": "http://controller:9322/actions/54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a" } ], "action_plan_uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf", "uuid": "54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a", "deleted_at": null, "updated_at": "2018-04-10T11:59:44.026973+00:00", "input_parameters": { "message": "Welcome" }, "action_type": "nop", "created_at": "2018-04-10T11:59:12.725147+00:00" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/api-root-response.json0000664000175000017500000000120300000000000025421 0ustar00zuulzuul00000000000000{ "default_version": { "id": "v1", "links": [ { "href": "http://controller:9322/v1/", "rel": "self" } ], "min_version": "1.0", "status": "CURRENT", "max_version": "1.1" }, "description": "Watcher is an OpenStack project which aims to improve physical resources usage through better VM placement.", "name": "OpenStack Watcher API", "versions": [ { "id": "v1", "links": [ { "href": "http://controller:9322/v1/", "rel": "self" } ], "min_version": "1.0", "status": "CURRENT", "max_version": "1.1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/api-v1-root-response.json0000664000175000017500000000306700000000000025757 0ustar00zuulzuul00000000000000{ "scoring_engines": [ { "href": "http://controller:9322/v1/scoring_engines/", "rel": "self" }, { "href": "http://controller:9322/scoring_engines/", "rel": "bookmark" } ], "media_types": [ { "base": "application/json", "type": "application/vnd.openstack.watcher.v1+json" } ], "links": [ { "href": "http://controller:9322/v1/", "rel": "self" }, { "href": "http://docs.openstack.org/developer/watcher/dev/api-spec-v1.html", "type": "text/html", "rel": "describedby" } ], "actions": [ { "href": "http://controller:9322/v1/actions/", "rel": "self" }, { "href": "http://controller:9322/actions/", "rel": "bookmark" } ], "audit_templates": [ { "href": "http://controller:9322/v1/audit_templates/", "rel": "self" }, { "href": "http://controller:9322/audit_templates/", "rel": "bookmark" } ], "action_plans": [ { "href": "http://controller:9322/v1/action_plans/", "rel": "self" }, { "href": "http://controller:9322/action_plans/", "rel": "bookmark" } ], "services": [ { "href": "http://controller:9322/v1/services/", "rel": "self" }, { "href": "http://controller:9322/services/", "rel": "bookmark" } ], "audits": [ { "href": "http://controller:9322/v1/audits/", "rel": "self" }, { "href": "http://controller:9322/audits/", "rel": "bookmark" } ], "id": "v1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audit-cancel-request.json0000664000175000017500000000013700000000000026057 0ustar00zuulzuul00000000000000[ { "op": "replace", "value": "CANCELLED", "path": "/state" } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audit-cancel-response.json0000664000175000017500000000307400000000000026230 0ustar00zuulzuul00000000000000{ "interval": "*/2 * * * *", "strategy_uuid": "6b3b3902-8508-4cb0-bb85-67f32866b086", "goal_uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "name": "audit1", "parameters": { "host_choice": "retry", "instance_metrics": { "cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used" }, "granularity": 300, "weights": { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 }, "retry_count": 1, "metrics": [ "cpu_util" ], "periods": { "instance": 720, "node": 600 }, "thresholds": { "cpu_util": 0.2, "memory.resident": 0.2 } }, "auto_trigger": false, "force": false, "uuid": "65a5da84-5819-4aea-8278-a28d2b489028", "goal_name": "workload_balancing", "scope": [], "created_at": "2018-04-06T07:27:27.820460+00:00", "deleted_at": null, "state": "CANCELLED", "audit_type": "CONTINUOUS", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/65a5da84-5819-4aea-8278-a28d2b489028" }, { "rel": "bookmark", "href": "http://controller:9322/audits/65a5da84-5819-4aea-8278-a28d2b489028" } ], "strategy_name": "workload_stabilization", "next_run_time": "2018-04-06T11:56:00", "updated_at": "2018-04-06T11:54:01.266447+00:00", "hostname": "controller", "start_time": null, "end_time": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audit-create-request-continuous.json0000664000175000017500000000055600000000000030306 0ustar00zuulzuul00000000000000{ "auto_trigger": false, "force": false, "audit_template_uuid": "76fddfee-a9c4-40b0-8da0-c19ad6904f09", "name": "test_audit", "parameters": { "metrics": [ "cpu_util" ] }, "audit_type": "CONTINUOUS", "interval": "*/2 * * * *", "start_time":"2018-04-02 20:30:00", "end_time": "2018-04-04 20:30:00" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audit-create-request-oneshot.json0000664000175000017500000000022100000000000027544 0ustar00zuulzuul00000000000000{ "audit_type": "ONESHOT", "auto_trigger": false, "force": true, "audit_template_uuid": "5e70a156-ced7-4012-b1c6-88fcb02ee0c1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audit-create-response.json0000664000175000017500000000300700000000000026242 0ustar00zuulzuul00000000000000{ "interval": "*/2 * * * *", "strategy_uuid": "6b3b3902-8508-4cb0-bb85-67f32866b086", "goal_uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "name": "test_audit", "parameters": { "host_choice": "retry", "granularity": 300, "thresholds": { "cpu_util": 0.2, "memory.resident": 0.2 }, "periods": { "node": 600, "instance": 720 }, "retry_count": 1, "metrics": [ "cpu_util" ], "weights": { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 }, "instance_metrics": { "cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used" } }, "auto_trigger": false, "force": false, "uuid": "65a5da84-5819-4aea-8278-a28d2b489028", "goal_name": "workload_balancing", "scope": [], "created_at": "2018-04-06T07:27:27.820460+00:00", "deleted_at": null, "state": "PENDING", "audit_type": "CONTINUOUS", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/65a5da84-5819-4aea-8278-a28d2b489028" }, { "rel": "bookmark", "href": "http://controller:9322/audits/65a5da84-5819-4aea-8278-a28d2b489028" } ], "strategy_name": "workload_stabilization", "next_run_time": null, "updated_at": null, "hostname": null, "start_time": null, "end_time": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audit-list-detailed-response.json0000664000175000017500000000402000000000000027517 0ustar00zuulzuul00000000000000{ "audits": [ { "interval": "*/2 * * * *", "strategy_uuid": "6b3b3902-8508-4cb0-bb85-67f32866b086", "goal_uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "name": "test_audit", "parameters": { "host_choice": "retry", "instance_metrics": { "cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used" }, "granularity": 300, "weights": { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 }, "retry_count": 1, "metrics": [ "cpu_util" ], "periods": { "instance": 720, "node": 600 }, "thresholds": { "cpu_util": 0.2, "memory.resident": 0.2 } }, "auto_trigger": false, "force": false, "uuid": "65a5da84-5819-4aea-8278-a28d2b489028", "goal_name": "workload_balancing", "scope": [], "created_at": "2018-04-06T07:27:27.820460+00:00", "deleted_at": null, "state": "ONGOING", "audit_type": "CONTINUOUS", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/65a5da84-5819-4aea-8278-a28d2b489028" }, { "rel": "bookmark", "href": "http://controller:9322/audits/65a5da84-5819-4aea-8278-a28d2b489028" } ], "strategy_name": "workload_stabilization", "next_run_time": "2018-04-06T09:46:00", "updated_at": "2018-04-06T09:44:01.604146+00:00", "hostname": "controller", "start_time": null, "end_time": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audit-list-response.json0000664000175000017500000000166000000000000025755 0ustar00zuulzuul00000000000000{ "audits": [ { "interval": null, "strategy_uuid": "e311727b-b9b3-43ef-a5f7-8bd7ea80df25", "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "dummy-2018-03-26T11:56:07.950400", "auto_trigger": false, "uuid": "ccc69a5f-114e-46f4-b15e-a77eaa337b01", "goal_name": "dummy", "scope": [], "state": "SUCCEEDED", "audit_type": "ONESHOT", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/ccc69a5f-114e-46f4-b15e-a77eaa337b01" }, { "rel": "bookmark", "href": "http://controller:9322/audits/ccc69a5f-114e-46f4-b15e-a77eaa337b01" } ], "strategy_name": "dummy", "next_run_time": null } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audit-show-response.json0000664000175000017500000000307600000000000025765 0ustar00zuulzuul00000000000000{ "interval": "*/2 * * * *", "strategy_uuid": "6b3b3902-8508-4cb0-bb85-67f32866b086", "goal_uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "name": "test_audit", "parameters": { "host_choice": "retry", "instance_metrics": { "cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used" }, "granularity": 300, "weights": { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 }, "retry_count": 1, "metrics": [ "cpu_util" ], "periods": { "instance": 720, "node": 600 }, "thresholds": { "cpu_util": 0.2, "memory.resident": 0.2 } }, "auto_trigger": false, "force": false, "uuid": "65a5da84-5819-4aea-8278-a28d2b489028", "goal_name": "workload_balancing", "scope": [], "created_at": "2018-04-06T07:27:27.820460+00:00", "deleted_at": null, "state": "ONGOING", "audit_type": "CONTINUOUS", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/65a5da84-5819-4aea-8278-a28d2b489028" }, { "rel": "bookmark", "href": "http://controller:9322/audits/65a5da84-5819-4aea-8278-a28d2b489028" } ], "strategy_name": "workload_stabilization", "next_run_time": "2018-04-06T11:56:00", "updated_at": "2018-04-06T11:54:01.266447+00:00", "hostname": "controller", "start_time": null, "end_time": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audit-update-request.json0000664000175000017500000000027000000000000026112 0ustar00zuulzuul00000000000000[ { "value": "CANCELLED", "path": "/state", "op": "replace" }, { "value": "audit1", "path": "/name", "op": "replace" } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audit-update-response.json0000664000175000017500000000307400000000000026265 0ustar00zuulzuul00000000000000{ "interval": "*/2 * * * *", "strategy_uuid": "6b3b3902-8508-4cb0-bb85-67f32866b086", "goal_uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "name": "audit1", "parameters": { "host_choice": "retry", "instance_metrics": { "cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used" }, "granularity": 300, "weights": { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 }, "retry_count": 1, "metrics": [ "cpu_util" ], "periods": { "instance": 720, "node": 600 }, "thresholds": { "cpu_util": 0.2, "memory.resident": 0.2 } }, "auto_trigger": false, "force": false, "uuid": "65a5da84-5819-4aea-8278-a28d2b489028", "goal_name": "workload_balancing", "scope": [], "created_at": "2018-04-06T07:27:27.820460+00:00", "deleted_at": null, "state": "CANCELLED", "audit_type": "CONTINUOUS", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/65a5da84-5819-4aea-8278-a28d2b489028" }, { "rel": "bookmark", "href": "http://controller:9322/audits/65a5da84-5819-4aea-8278-a28d2b489028" } ], "strategy_name": "workload_stabilization", "next_run_time": "2018-04-06T11:56:00", "updated_at": "2018-04-06T11:54:01.266447+00:00", "hostname": "controller", "start_time": null, "end_time": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audittemplate-create-request-full.json0000664000175000017500000000020400000000000030564 0ustar00zuulzuul00000000000000{ "name": "at2", "goal": "dummy", "strategy": "dummy", "description": "the second audit template", "scope": [] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audittemplate-create-request-minimal.json0000664000175000017500000000005200000000000031251 0ustar00zuulzuul00000000000000{ "name": "at2", "goal": "dummy" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audittemplate-create-response.json0000664000175000017500000000124200000000000027775 0ustar00zuulzuul00000000000000{ "description": null, "strategy_uuid": null, "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "at3", "uuid": "b4041d8c-85d7-4224-851d-649fe48b7196", "goal_name": "dummy", "scope": [], "created_at": "2018-04-04T08:38:33.110432+00:00", "deleted_at": null, "links": [ { "rel": "self", "href": "http://controller:9322/v1/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" }, { "rel": "bookmark", "href": "http://controller:9322/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" } ], "strategy_name": null, "updated_at": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audittemplate-list-detailed-response.json0000664000175000017500000000136200000000000031261 0ustar00zuulzuul00000000000000{ "audit_templates":[ { "strategy_uuid": null, "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "at3", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" }, { "rel": "bookmark", "href": "http://controller:9322/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" } ], "strategy_name": null, "uuid": "b4041d8c-85d7-4224-851d-649fe48b7196", "goal_name": "dummy", "scope": [], "description": null } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audittemplate-list-response.json0000664000175000017500000000132100000000000027503 0ustar00zuulzuul00000000000000{ "audit_templates":[ { "strategy_uuid": null, "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "at3", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" }, { "rel": "bookmark", "href": "http://controller:9322/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" } ], "strategy_name": null, "uuid": "b4041d8c-85d7-4224-851d-649fe48b7196", "goal_name": "dummy", "scope": [] } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audittemplate-show-response.json0000664000175000017500000000130400000000000027511 0ustar00zuulzuul00000000000000{ "description": "test 1", "strategy_uuid": null, "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "at1", "uuid": "0d100c27-14af-4962-86fb-f6079287c9c6", "goal_name": "dummy", "scope": [], "created_at": "2018-04-04T07:48:36.175472+00:00", "deleted_at": null, "links": [ { "rel": "self", "href": "http://controller:9322/v1/audit_templates/0d100c27-14af-4962-86fb-f6079287c9c6" }, { "rel": "bookmark", "href": "http://controller:9322/audit_templates/0d100c27-14af-4962-86fb-f6079287c9c6" } ], "strategy_name": null, "updated_at": "2018-04-05T07:57:55.803650+00:00" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audittemplate-update-request.json0000664000175000017500000000013500000000000027646 0ustar00zuulzuul00000000000000[ { "op": "replace", "value": "PENDING", "path": "/state" } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/audittemplate-update-response.json0000664000175000017500000000130500000000000030014 0ustar00zuulzuul00000000000000{ "description": "test 1", "strategy_uuid": null, "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "at11", "uuid": "0d100c27-14af-4962-86fb-f6079287c9c6", "goal_name": "dummy", "scope": [], "created_at": "2018-04-04T07:48:36.175472+00:00", "deleted_at": null, "links": [ { "rel": "self", "href": "http://controller:9322/v1/audit_templates/0d100c27-14af-4962-86fb-f6079287c9c6" }, { "rel": "bookmark", "href": "http://controller:9322/audit_templates/0d100c27-14af-4962-86fb-f6079287c9c6" } ], "strategy_name": null, "updated_at": "2018-04-05T07:57:42.139127+00:00" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/datamodel-list-response.json0000664000175000017500000000242000000000000026574 0ustar00zuulzuul00000000000000{ "context": [ { "server_uuid": "1bf91464-9b41-428d-a11e-af691e5563bb", "server_name": "chenke-test1", "server_vcpus": "1", "server_memory": "512", "server_disk": "1", "server_state": "active", "node_uuid": "253e5dd0-9384-41ab-af13-4f2c2ce26112", "node_hostname": "localhost.localdomain", "node_vcpus": "4", "node_vcpu_ratio": "16.0", "node_memory": "16383", "node_memory_ratio": "1.5", "node_disk": "37", "node_disk_ratio": "1.0", "node_state": "up" }, { "server_uuid": "e2cb5f6f-fa1d-4ba2-be1e-0bf02fa86ba4", "server_name": "chenke-test2", "server_vcpus": "1", "server_memory": "512", "server_disk": "1", "server_state": "active", "node_uuid": "253e5dd0-9384-41ab-af13-4f2c2ce26112", "node_hostname": "localhost.localdomain", "node_vcpus": "4", "node_vcpu_ratio": "16.0", "node_memory": "16383", "node_memory_ratio": "1.5", "node_disk": "37", "node_disk_ratio": "1.0", "node_state": "up" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/goal-list-response.json0000664000175000017500000000416300000000000025572 0ustar00zuulzuul00000000000000{ "goals": [ { "efficacy_specification": [], "uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "links": [ { "rel": "self", "href": "http://controller:9322/v1/goals/e1a5a45b-f251-47cf-9c5f-fa1e66e1286a" }, { "rel": "bookmark", "href": "http://controller:9322/goals/e1a5a45b-f251-47cf-9c5f-fa1e66e1286a" } ], "name": "workload_balancing", "display_name": "Workload Balancing" }, { "efficacy_specification": [ { "description": "The total number of enabled compute nodes.", "schema": "Range(min=0, max=None, min_included=True, max_included=True, msg=None)", "name": "compute_nodes_count", "unit": null }, { "description": "The number of compute nodes to be released.", "schema": "Range(min=0, max=None, min_included=True, max_included=True, msg=None)", "name": "released_compute_nodes_count", "unit": null }, { "description": "The number of VM migrations to be performed.", "schema": "Range(min=0, max=None, min_included=True, max_included=True, msg=None)", "name": "instance_migrations_count", "unit": null } ], "uuid": "cb9afa5e-aec7-4a8c-9261-c15c33f2262b", "links": [ { "rel": "self", "href": "http://controller:9322/v1/goals/cb9afa5e-aec7-4a8c-9261-c15c33f2262b" }, { "rel": "bookmark", "href": "http://controller:9322/goals/cb9afa5e-aec7-4a8c-9261-c15c33f2262b" } ], "name": "server_consolidation", "display_name": "Server Consolidation" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/goal-show-response.json0000664000175000017500000000105400000000000025573 0ustar00zuulzuul00000000000000{ "efficacy_specification": [], "name": "saving_energy", "links": [ { "rel": "self", "href": "http://controller:9322/v1/goals/6f52889a-9dd4-4dbb-8e70-39b56c4836cc" }, { "rel": "bookmark", "href": "http://controller:9322/goals/6f52889a-9dd4-4dbb-8e70-39b56c4836cc" } ], "uuid": "6f52889a-9dd4-4dbb-8e70-39b56c4836cc", "updated_at": null, "display_name": "Saving Energy", "created_at": "2018-03-26T11:55:24.365584+00:00", "deleted_at": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/scoring_engine-list-detailed-response.json0000664000175000017500000000121600000000000031406 0ustar00zuulzuul00000000000000{ "scoring_engines": [ { "description": "Dummy Scorer calculating the average value", "uuid": "5a44f007-55b1-423c-809f-6a274a9bd93b", "links": [ { "rel": "self", "href": "http://controller:9322/v1/scoring_engines/5a44f007-55b1-423c-809f-6a274a9bd93b" }, { "rel": "bookmark", "href": "http://controller:9322/scoring_engines/5a44f007-55b1-423c-809f-6a274a9bd93b" } ], "name": "dummy_avg_scorer", "metainfo": "" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/scoring_engine-list-response.json0000664000175000017500000000116200000000000027635 0ustar00zuulzuul00000000000000{ "scoring_engines": [ { "description": "Dummy Scorer calculating the average value", "uuid": "5a44f007-55b1-423c-809f-6a274a9bd93b", "links": [ { "rel": "self", "href": "http://controller:9322/v1/scoring_engines/5a44f007-55b1-423c-809f-6a274a9bd93b" }, { "rel": "bookmark", "href": "http://controller:9322/scoring_engines/5a44f007-55b1-423c-809f-6a274a9bd93b" } ], "name": "dummy_avg_scorer" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/scoring_engine-show-response.json0000664000175000017500000000075300000000000027647 0ustar00zuulzuul00000000000000{ "description": "Dummy Scorer calculating the maximum value", "uuid": "1ac42282-4e77-473e-898b-62ea007f1deb", "links": [ { "rel": "self", "href": "http://controller:9322/v1/scoring_engines/1ac42282-4e77-473e-898b-62ea007f1deb" }, { "rel": "bookmark", "href": "http://controller:9322/scoring_engines/1ac42282-4e77-473e-898b-62ea007f1deb" } ], "name": "dummy_max_scorer", "metainfo": "" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/service-list-detailed-response.json0000664000175000017500000000127600000000000030063 0ustar00zuulzuul00000000000000{ "services": [ { "status": "ACTIVE", "name": "watcher-applier", "host": "controller", "links": [ { "rel": "self", "href": "http://controller:9322/v1/services/1" }, { "rel": "bookmark", "href": "http://controller:9322/services/1" } ], "id": 1, "deleted_at": null, "updated_at": "2018-04-26T08:52:37.652895+00:00", "last_seen_up": "2018-04-26T08:52:37.648572", "created_at": "2018-03-26T11:55:24.075093+00:00" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/service-list-response.json0000664000175000017500000000167600000000000026316 0ustar00zuulzuul00000000000000{ "services": [ { "id": 1, "status": "ACTIVE", "name": "watcher-applier", "host": "controller", "links": [ { "rel": "self", "href": "http://controller:9322/v1/services/1" }, { "rel": "bookmark", "href": "http://controller:9322/services/1" } ] }, { "id": 2, "status": "ACTIVE", "name": "watcher-decision-engine", "host": "controller", "links": [ { "rel": "self", "href": "http://controller:9322/v1/services/2" }, { "rel": "bookmark", "href": "http://controller:9322/services/2" } ] } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/service-show-response.json0000664000175000017500000000100200000000000026302 0ustar00zuulzuul00000000000000{ "status": "ACTIVE", "name": "watcher-applier", "host": "controller", "links": [ { "rel": "self", "href": "http://controller:9322/v1/services/1" }, { "rel": "bookmark", "href": "http://controller:9322/services/1" } ], "id": 1, "deleted_at": null, "updated_at": "2018-04-26T09:45:37.653061+00:00", "last_seen_up": "2018-04-26T09:45:37.649314", "created_at": "2018-03-26T11:55:24.075093+00:00" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/strategy-list-detailed-response.json0000664000175000017500000000245000000000000030260 0ustar00zuulzuul00000000000000{ "strategies": [ { "goal_uuid": "cb9afa5e-aec7-4a8c-9261-c15c33f2262b", "name": "vm_workload_consolidation", "links": [ { "rel": "self", "href": "http://controller:9322/v1/strategies/6382b2d7-259e-487d-88db-78c852ffea54" }, { "rel": "bookmark", "href": "http://controller:9322/strategies/6382b2d7-259e-487d-88db-78c852ffea54" } ], "parameters_spec": { "properties": { "granularity": { "default": 300, "type": "number", "description": "The time between two measures in an aggregated timeseries of a metric." }, "period": { "default": 3600, "type": "number", "description": "The time interval in seconds for getting statistic aggregation" } } }, "uuid": "6382b2d7-259e-487d-88db-78c852ffea54", "goal_name": "server_consolidation", "display_name": "VM Workload Consolidation Strategy" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/strategy-list-response.json0000664000175000017500000000124000000000000026503 0ustar00zuulzuul00000000000000{ "strategies": [ { "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "dummy", "links": [ { "rel": "self", "href": "http://controller:9322/v1/strategies/e311727b-b9b3-43ef-a5f7-8bd7ea80df25" }, { "rel": "bookmark", "href": "http://controller:9322/strategies/e311727b-b9b3-43ef-a5f7-8bd7ea80df25" } ], "uuid": "e311727b-b9b3-43ef-a5f7-8bd7ea80df25", "goal_name": "dummy", "display_name": "Dummy strategy" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/strategy-show-response.json0000664000175000017500000000170400000000000026515 0ustar00zuulzuul00000000000000{ "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "dummy", "links": [ { "rel": "self", "href": "http://controller:9322/v1/strategies/e311727b-b9b3-43ef-a5f7-8bd7ea80df25" }, { "rel": "bookmark", "href": "http://controller:9322/strategies/e311727b-b9b3-43ef-a5f7-8bd7ea80df25" } ], "parameters_spec": { "properties": { "para2": { "default": "hello", "type": "string", "description": "string parameter example" }, "para1": { "maximum": 10.2, "type": "number", "minimum": 1.0, "description": "number parameter example", "default": 3.2 } } }, "uuid": "e311727b-b9b3-43ef-a5f7-8bd7ea80df25", "goal_name": "dummy", "display_name": "Dummy strategy" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/samples/strategy-state-response.json0000664000175000017500000000201400000000000026650 0ustar00zuulzuul00000000000000[ { "state": "gnocchi: available", "comment": "", "mandatory": true, "type": "Datasource" }, { "state": [ { "compute.node.cpu.percent": "available" }, { "cpu_util": "available" }, { "memory.resident": "available" }, { "hardware.memory.used": "available" } ], "comment": "", "mandatory": false, "type": "Metrics" }, { "state": [ { "compute_model": "available" }, { "storage_model": "not available" }, { "baremetal_model": "not available" } ], "comment": "", "mandatory": true, "type": "CDM" }, { "state": "workload_stabilization", "mandatory": "", "comment": "", "type": "Name" } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/watcher-api-v1-actionplans.inc0000664000175000017500000001337600000000000025246 0ustar00zuulzuul00000000000000.. -*- rst -*- ============ Action Plans ============ An ``Action Plan`` specifies a flow of ``Actions`` that should be executed in order to satisfy a given ``Goal``. It also contains an estimated ``global efficacy`` alongside a set of ``efficacy indicators``. An ``Action Plan`` is generated by Watcher when an ``Audit`` is successful which implies that the ``Strategy`` which was used has found a ``Solution`` to achieve the ``Goal`` of this ``Audit``. In the default implementation of Watcher, an action plan is composed of a graph of linked ``Actions``. Each action may have parent actions, which should be executed prior to child action. Start Action Plan ================= .. rest_method:: POST /v1/action_plans/{actionplan_ident}/start Starts a created Action Plan resource. Normal response codes: 200 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - actionplan_ident: actionplan_ident Response -------- The list and example below are representative of the response as of API version 1: .. rest_parameters:: parameters.yaml - uuid: uuid - state: actionplan_state - audit_uuid: actionplan_audit_uuid - strategy_uuid: strategy_uuid - strategy_name: strategy_name - efficacy_indicators: actionplan_efficacy_indicators - global_efficacy: actionplan_global_efficacy - links: links - hostname: actionplan_hostname **Example JSON representation of an Action Plan:** .. literalinclude:: samples/actionplan-start-response.json :language: javascript List Action Plan ================ .. rest_method:: GET /v1/action_plans Returns a list of Action Plan resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - audit_uuid: r_audit - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - state: actionplan_state - audit_uuid: actionplan_audit_uuid - strategy_uuid: strategy_uuid - strategy_name: strategy_name - efficacy_indicators: actionplan_efficacy_indicators - global_efficacy: actionplan_global_efficacy - links: links **Example JSON representation of an Action Plan:** .. literalinclude:: samples/actionplan-list-response.json :language: javascript List Action Plan detailed ========================= .. rest_method:: GET /v1/action_plans/detail Returns a list of Action Plan resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - audit_uuid: r_audit - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - deleted_at: deleted_at - updated_at: updated_at - created_at: created_at - uuid: uuid - state: actionplan_state - audit_uuid: actionplan_audit_uuid - strategy_uuid: strategy_uuid - strategy_name: strategy_name - efficacy_indicators: actionplan_efficacy_indicators - global_efficacy: actionplan_global_efficacy - links: links - hostname: actionplan_hostname **Example JSON representation of an Action Plan:** .. literalinclude:: samples/actionplan-list-detailed-response.json :language: javascript Show Action Plan ================ .. rest_method:: GET /v1/action_plans/{actionplan_ident} Shows details for an Action Plan. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - actionplan_ident: actionplan_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - state: actionplan_state - audit_uuid: actionplan_audit_uuid - strategy_uuid: strategy_uuid - strategy_name: strategy_name - efficacy_indicators: actionplan_efficacy_indicators - global_efficacy: actionplan_global_efficacy - links: links - hostname: actionplan_hostname **Example JSON representation of an Audit:** .. literalinclude:: samples/actionplan-show-response.json :language: javascript Cancel Action Plan ================== .. rest_method:: PATCH /v1/action_plans/{actionplan_ident} Cancels a created Action Plan resource. .. note: If Action Plan is in ONGOING state, then ``state`` attribute should be replaced with ``CANCELLING`` value. Otherwise, ``CANCELLED`` is to be used. Normal response codes: 200 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - actionplan_ident: actionplan_ident **Example Action Plan ONGOING cancelling request:** .. literalinclude:: samples/actionplan-cancel-request-cancelling.json :language: javascript **Example Action Plan PENDING cancelling request:** .. literalinclude:: samples/actionplan-cancel-request-pending.json :language: javascript Response -------- The list and example below are representative of the response as of API version 1: .. rest_parameters:: parameters.yaml - uuid: uuid - state: actionplan_state - audit_uuid: actionplan_audit_uuid - strategy_uuid: strategy_uuid - strategy_name: strategy_name - efficacy_indicators: actionplan_efficacy_indicators - global_efficacy: actionplan_global_efficacy - links: links - hostname: actionplan_hostname **Example JSON representation of an Action Plan:** .. literalinclude:: samples/actionplan-start-response.json :language: javascript Delete Action Plan ================== .. rest_method:: DELETE /v1/action_plans/{actionplan_ident} Deletes an Action Plan. Action Plan can be deleted only from SUCCEEDED, RECOMMENDED, FAILED, SUPERSEDED, CANCELLED states. Normal response codes: 204 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - actionplan_ident: actionplan_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/watcher-api-v1-actions.inc0000664000175000017500000000714200000000000024365 0ustar00zuulzuul00000000000000.. -*- rst -*- ======= Actions ======= An ``Action`` is what enables Watcher to transform the current state of a ``Cluster`` after an ``Audit``. An ``Action`` is an atomic task which changes the current state of a target Managed resource of the OpenStack ``Cluster`` such as: - Live migration of an instance from one compute node to another compute node with Nova - Changing the power level of a compute node (ACPI level, ...) - Changing the current state of a compute node (enable or disable) with Nova In most cases, an ``Action`` triggers some concrete commands on an existing OpenStack module (Nova, Neutron, Cinder, Ironic, etc.). An ``Action`` has a life-cycle and its current state may be one of the following: - **PENDING** : the ``Action`` has not been executed yet by the ``Watcher Applier``. - **ONGOING** : the ``Action`` is currently being processed by the ``Watcher Applier``. - **SUCCEEDED** : the ``Action`` has been executed successfully - **FAILED** : an error occurred while trying to execute the ``Action``. - **DELETED** : the ``Action`` is still stored in the ``Watcher database`` but is not returned any more through the Watcher APIs. - **CANCELLED** : the ``Action`` was in **PENDING** or **ONGOING** state and was cancelled by the ``Administrator`` ``Actions`` are created by ``Watcher Planner`` as result of Audit's execution. ``Action`` can't be created, modified or deleted by user. List Action =========== .. rest_method:: GET /v1/actions Returns a list of Action resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - action_plan_uuid: r_action_plan - audit_uuid: r_audit - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - action_type: action_type - state: action_state - action_plan_uuid: action_action_plan_uuid - parents: action_parents - links: links **Example JSON representation of an Action:** .. literalinclude:: samples/actions-list-response.json :language: javascript List Action Detailed ==================== .. rest_method:: GET /v1/actions/detail Returns a list of Action resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - action_plan_uuid: r_action_plan - audit_uuid: r_audit - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - action_type: action_type - state: action_state - action_plan_uuid: action_action_plan_uuid - parents: action_parents - description: action_description - input_parameters: action_input_parameters - links: links **Example JSON representation of an Action:** .. literalinclude:: samples/actions-list-detailed-response.json :language: javascript Show Action =========== .. rest_method:: GET /v1/actions/{action_ident} Shows details for an Action. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - action_ident: action_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - action_type: action_type - state: action_state - action_plan_uuid: action_action_plan_uuid - parents: action_parents - description: action_description - input_parameters: action_input_parameters - links: links **Example JSON representation of an Action:** .. literalinclude:: samples/actions-show-response.json :language: javascript././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/watcher-api-v1-audits.inc0000664000175000017500000001746700000000000024231 0ustar00zuulzuul00000000000000.. -*- rst -*- ====== Audits ====== There are creating, listing, updating and deleting methods of Watcher Audit resources which are implemented via the ``/v1/audits`` resource. In the Watcher system, an ``Audit`` is a request for optimizing a ``Cluster``. The optimization is done in order to satisfy one ``Goal`` on a given ``Cluster``. For each ``Audit``, the Watcher system generates an ``Action Plan``. Create Audit ============ .. rest_method:: POST /v1/audits Creates a new Audit resource. Mandatory attribute to be supplied: ``audit_type``. ``Audit`` can be created either based on existed ``Audit Template`` or by itself. In the first case, there also should be supplied ``audit_template_uuid``. If ``Audit`` is created without ``Audit Template``, ``goal`` should be provided. Normal response codes: 201 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - audit_template_uuid: audittemplate_uuid - audit_type: audit_type - name: audit_name - goal: audit_goal - strategy: audit_strategy - parameters: audit_parameters - interval: audit_interval - auto_trigger: audit_autotrigger - start_time: audit_starttime_req - end_time: audit_endtime_req - force: audit_force **Example ONESHOT Audit creation request:** .. literalinclude:: samples/audit-create-request-oneshot.json :language: javascript **Example CONTINUOUS Audit creation request with a specified strategy:** .. literalinclude:: samples/audit-create-request-continuous.json :language: javascript Response -------- The list and example below are representative of the response as of API version 1: .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - parameters: audit_parameters - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links - hostname: audit_hostname - start_time: audit_starttime_resp - end_time: audit_endtime_resp - force: audit_force **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-create-response.json :language: javascript List Audit ========== .. rest_method:: GET /v1/audits Returns a list of Audit resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-list-response.json :language: javascript List Audit Detailed =================== .. rest_method:: GET /v1/audits/detail Returns a list of Audit resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - parameters: audit_parameters - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links - hostname: audit_hostname - start_time: audit_starttime_resp - end_time: audit_endtime_resp - force: audit_force **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-list-detailed-response.json :language: javascript Show Audit ========== .. rest_method:: GET /v1/audits/{audit_ident} Shows details for an Audit. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - audit_ident: audit_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - parameters: audit_parameters - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links - hostname: audit_hostname - start_time: audit_starttime_resp - end_time: audit_endtime_resp - force: audit_force **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-show-response.json :language: javascript Cancel Audit ============ .. rest_method:: PATCH /v1/audits/{audit_ident} Cancels an ONGOING Audit resource. Normal response codes: 200 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - audit_ident: audit_ident **Example Audit cancelling request:** .. literalinclude:: samples/audit-cancel-request.json :language: javascript Response -------- The list and example below are representative of the response as of API version 1: .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - parameters: audit_parameters - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links - hostname: audit_hostname - start_time: audit_starttime_resp - end_time: audit_endtime_resp - force: audit_force **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-cancel-response.json :language: javascript Update Audit ============ .. rest_method:: PATCH /v1/audits/{audit_ident} Updates an Audit with the given information. .. note: ``audit_type`` shouldn't be changed by PATCH method. Normal response codes: 200 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - audit_ident: audit_ident **Example PATCH document updating Audit:** .. literalinclude:: samples/audit-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - parameters: audit_parameters - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links - hostname: audit_hostname - start_time: audit_starttime_resp - end_time: audit_endtime_resp - force: audit_force **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-update-response.json :language: javascript Delete Audit ============ .. rest_method:: DELETE /v1/audits/{audit_ident} Deletes an Audit. Audit can be deleted only from FAILED, SUCCEEDED, CANCELLED, SUSPENDED states. Normal response codes: 204 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - audit_ident: audit_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/watcher-api-v1-audittemplates.inc0000664000175000017500000001264300000000000025754 0ustar00zuulzuul00000000000000.. -*- rst -*- =============== Audit Templates =============== There are creating, listing, updating and deleting methods of Watcher Audit Template resources which are implemented via the ``/v1/audit_templates`` resource. An Audit may be launched several times with the same settings (Goal, thresholds, ...). Therefore it makes sense to save those settings in some sort of Audit preset object, which is known as an Audit Template. An Audit Template contains at least the Goal of the Audit. Create Audit Template ===================== .. rest_method:: POST /v1/audit_templates Creates a new Audit Template resource. It requires ``name`` and ``goal`` attributes to be supplied in the request body. Normal response codes: 201 Error codes: 400,404,409 Request ------- .. rest_parameters:: parameters.yaml - name: audittemplate_name - goal: audittemplate_goal - strategy: audittemplate_strategy - description: audittemplate_description - scope: audittemplate_scope **Example Audit Template creation request without a specified strategy:** .. literalinclude:: samples/audittemplate-create-request-minimal.json :language: javascript **Example Audit Template creation request with a specified strategy:** .. literalinclude:: samples/audittemplate-create-request-full.json :language: javascript Response -------- The list and example below are representative of the response as of API version 1: .. rest_parameters:: parameters.yaml - uuid: uuid - name: audittemplate_name - description: audittemplate_description - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - scope: audittemplate_scope - links: links **Example JSON representation of an Audit Template:** .. literalinclude:: samples/audittemplate-create-response.json :language: javascript List Audit Template =================== .. rest_method:: GET /v1/audit_templates Returns a list of Audit Template resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audittemplate_name - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - scope: audittemplate_scope - links: links **Example JSON representation of an Audit Template:** .. literalinclude:: samples/audittemplate-list-response.json :language: javascript List Audit Template Detailed ============================ .. rest_method:: GET /v1/audit_templates/detail Returns a list of Audit Template resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audittemplate_name - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - scope: audittemplate_scope - links: links - description: audittemplate_description **Example JSON representation of an Audit Template:** .. literalinclude:: samples/audittemplate-list-detailed-response.json :language: javascript Show Audit Template =================== .. rest_method:: GET /v1/audit_templates/{audittemplate_ident} Shows details for an Audit Template. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - audittemplate_ident: audittemplate_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audittemplate_name - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - scope: audittemplate_scope - links: links - description: audittemplate_description **Example JSON representation of an Audit Template:** .. literalinclude:: samples/audittemplate-show-response.json :language: javascript Update Audit Template ===================== .. rest_method:: PATCH /v1/audit_templates/{audittemplate_ident} Updates an Audit Template with the given information. Normal response codes: 200 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - audittemplate_ident: audittemplate_ident **Example PATCH document updating Audit Template:** .. literalinclude:: samples/audittemplate-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audittemplate_name - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - scope: audittemplate_scope - links: links - description: audittemplate_description **Example JSON representation of an Audit Template:** .. literalinclude:: samples/audittemplate-update-response.json :language: javascript Delete Audit Template ===================== .. rest_method:: DELETE /v1/audit_templates/{audittemplate_ident} Deletes an Audit Template. Normal response codes: 204 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - audittemplate_ident: audittemplate_ident././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/watcher-api-v1-datamodel.inc0000664000175000017500000000220700000000000024654 0ustar00zuulzuul00000000000000.. -*- rst -*- ========== Data Model ========== .. versionadded:: 1.3 ``Data Model`` is very important for Watcher to generate resource optimization solutions. Users can easily view the data model by the API. List Data Model =============== .. rest_method:: GET /v1/data_model Returns the information about Data Model. Normal response codes: 200 Error codes: 400,401,406 Request ------- .. rest_parameters:: parameters.yaml - audit: r_audit - type: r_type Response -------- .. rest_parameters:: parameters.yaml - server_uuid: server_uuid - server_name: server_name - server_vcpus: server_vcpus - server_memory: server_memory - server_disk: server_disk - server_state: server_state - node_uuid: node_uuid - node_hostname: node_hostname - node_vcpus: node_vcpus - node_vcpu_ratio: node_vcpu_ratio - node_memory: node_memory - node_memory_ratio: node_memory_ratio - node_disk: node_disk - node_disk_ratio: node_disk_ratio - node_state: node_state **Example JSON representation of a Data Model:** .. literalinclude:: samples/datamodel-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/watcher-api-v1-goals.inc0000664000175000017500000000466300000000000024037 0ustar00zuulzuul00000000000000.. -*- rst -*- ===== Goals ===== A ``Goal`` is a human readable, observable and measurable end result having one objective to be achieved. Here are some examples of ``Goals``: - minimize the energy consumption - minimize the number of compute nodes (consolidation) - balance the workload among compute nodes - minimize the license cost (some software have a licensing model which is based on the number of sockets or cores where the software is deployed) - find the most appropriate moment for a planned maintenance on a given group of host (which may be an entire availability zone): power supply replacement, cooling system replacement, hardware modification, ... List Goal ========= .. rest_method:: GET /v1/goals Returns a list of Goal resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - efficacy_specification: goal_efficacy_specification - name: goal_name - display_name: goal_display_name - links: links **Example JSON representation of a Goal:** .. literalinclude:: samples/goal-list-response.json :language: javascript List Goal Detailed ================== .. rest_method:: GET /v1/goals/detail Returns a list of Goal resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - efficacy_specification: goal_efficacy_specification - name: goal_name - display_name: goal_display_name - links: links **Example JSON representation of a Goal:** .. literalinclude:: samples/goal-list-response.json :language: javascript Show Goal ========= .. rest_method:: GET /v1/goals/{goal_ident} Shows details for an Goal. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - goal_ident: goal_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - efficacy_specification: goal_efficacy_specification - name: goal_name - display_name: goal_display_name - links: links **Example JSON representation of a Goal:** .. literalinclude:: samples/goal-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/watcher-api-v1-scoring_engines.inc0000664000175000017500000000502500000000000026077 0ustar00zuulzuul00000000000000.. -*- rst -*- =============== Scoring Engines =============== A ``Scoring Engine`` is an executable that has a well-defined input, a well-defined output, and performs a purely mathematical task. That is, the calculation does not depend on the environment in which it is running - it would produce the same result anywhere. Because there might be multiple algorithms used to build a particular data model (and therefore a scoring engine), the usage of scoring engine might vary. A metainfo field is supposed to contain any information which might be needed by the user of a given scoring engine. List Scoring Engine =================== .. rest_method:: GET /v1/scoring_engines Returns a list of Scoring Engine resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: scoring_engine_name - description: scoring_engine_description - links: links **Example JSON representation of a Scoring Engine:** .. literalinclude:: samples/scoring_engine-list-response.json :language: javascript List Scoring Engine Detailed ============================ .. rest_method:: GET /v1/scoring_engines/detail Returns a list of Scoring Engine resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: scoring_engine_name - description: scoring_engine_description - metainfo: scoring_engine_metainfo - links: links **Example JSON representation of a Scoring Engine:** .. literalinclude:: samples/scoring_engine-list-detailed-response.json :language: javascript Show Scoring Engine =================== .. rest_method:: GET /v1/scoring_engines/{scoring_engine_ident} Shows details for a Scoring Engine resource. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - scoring_engine_ident: scoring_engine_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: scoring_engine_name - description: scoring_engine_description - metainfo: scoring_engine_metainfo - links: links **Example JSON representation of a Scoring Engine:** .. literalinclude:: samples/scoring_engine-show-response.json :language: javascript././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/watcher-api-v1-services.inc0000664000175000017500000000367600000000000024560 0ustar00zuulzuul00000000000000.. -*- rst -*- ======== Services ======== This resource represents Watcher services, their states and hosts they are placed on. List Service ============ .. rest_method:: GET /v1/services Returns a list of Service resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - id: service_id - name: service_name - host: service_host - status: service_status - links: links **Example JSON representation of a Service:** .. literalinclude:: samples/service-list-response.json :language: javascript List Service Detailed ===================== .. rest_method:: GET /v1/services/detail Returns a list of Service resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - id: service_id - name: service_name - host: service_host - status: service_status - last_seen_up: service_last_seen_up - links: links **Example JSON representation of a Service:** .. literalinclude:: samples/service-list-detailed-response.json :language: javascript Show Service ============ .. rest_method:: GET /v1/services/{service_ident} Shows details for a Service resource. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - service_ident: service_ident Response -------- .. rest_parameters:: parameters.yaml - id: service_id - name: service_name - host: service_host - status: service_status - last_seen_up: service_last_seen_up - links: links **Example JSON representation of a Service:** .. literalinclude:: samples/service-show-response.json :language: javascript././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/watcher-api-v1-strategies.inc0000664000175000017500000000636600000000000025106 0ustar00zuulzuul00000000000000.. -*- rst -*- ========== Strategies ========== A ``Strategy`` is an algorithm implementation which is able to find a ``Solution`` for a given ``Goal``. To get more information about strategies that are shipped along with Watcher, visit `strategies page`_. There may be several potential strategies which are able to achieve the same ``Goal``. This is why it is possible to configure which specific ``Strategy`` should be used for each goal. Some strategies may provide better optimization results but may take more time to find an optimal ``Solution``. .. _`strategies page`: https://docs.openstack.org/watcher/latest/strategies/index.html List Strategy ============= .. rest_method:: GET /v1/strategies Returns a list of Strategy resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: strategy_name - display_name: strategy_display_name - goal_name: goal_name - goal_uuid: goal_uuid - links: links **Example JSON representation of a Strategy:** .. literalinclude:: samples/strategy-list-response.json :language: javascript List Strategy Detailed ====================== .. rest_method:: GET /v1/strategies/detail Returns a list of Strategy resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: strategy_name - display_name: strategy_display_name - parameters_spec: strategy_parameters_spec - goal_name: goal_name - goal_uuid: goal_uuid - links: links **Example JSON representation of a Strategy:** .. literalinclude:: samples/strategy-list-detailed-response.json :language: javascript Show Strategy ============= .. rest_method:: GET /v1/strategies/{strategy_ident} Shows details for a Strategy resource. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - strategy_ident: strategy_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: strategy_name - display_name: strategy_display_name - parameters_spec: strategy_parameters_spec - goal_name: goal_name - goal_uuid: goal_uuid - links: links **Example JSON representation of a Strategy:** .. literalinclude:: samples/strategy-show-response.json :language: javascript Show Strategy State =================== .. rest_method:: GET /v1/strategies/{strategy_ident}/state Retrieve an information about strategy requirements. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - strategy_ident: strategy_ident Response -------- .. rest_parameters:: parameters.yaml - state: strategy_check_state - comment: strategy_check_comment - mandatory: strategy_check_mandatory - type: strategy_check_type **Example JSON representation of a Strategy:** .. literalinclude:: samples/strategy-state-response.json :language: javascript././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/watcher-api-v1-webhooks.inc0000664000175000017500000000050700000000000024544 0ustar00zuulzuul00000000000000.. -*- rst -*- ======== Webhooks ======== .. versionadded:: 1.4 Triggers an event based Audit. Trigger EVENT Audit =================== .. rest_method:: POST /v1/webhooks/{audit_ident} Normal response codes: 202 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - audit_ident: audit_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/api-ref/source/watcher-api-versions.inc0000664000175000017500000000400600000000000024245 0ustar00zuulzuul00000000000000.. -*- rst -*- ============ API versions ============ In order to bring new features to users over time, the Watcher API supports versioning. There are two kinds of versions in Watcher. - ''major versions'', which have dedicated URLs. - ''microversions'', which can be requested using the ``OpenStack-API-Version`` header. .. note:: The maximum microversion depends on release. Please reference: `API Microversion History `__ for API microversion history details. The Version API resource works differently from other API resources as they *do not* require authentication. If Watcher receives a request with unsupported version, it responds with a 406 Not Acceptable, along with the -Min- and -Max- headers that it can support. List API versions ================= .. rest_method:: GET / This fetches all the information about all known major API versions in the deployment. Links to more specific information will be provided for each major API version, as well as information about supported min and max microversions. Normal response codes: 200 Request ------- Response Example ---------------- .. rest_parameters:: parameters.yaml - description: version_description - versions: versions - version: version - id: version_id - links: links - min_version: openstack-api-min-version - max_version: openstack-api-max-version .. literalinclude:: samples/api-root-response.json :language: javascript Show v1 API =========== .. rest_method:: GET /v1/ Show all the resources within the Watcher v1 API. Normal response codes: 200 Request ------- Response Example ---------------- .. rest_parameters:: parameters.yaml - id: version_id - links: links - OpenStack-API-Version: header_version - OpenStack-API-Minimum-Version: openstack-api-min-version - OpenStack-API-Maximum-Version: openstack-api-max-version .. literalinclude:: samples/api-v1-root-response.json :language: javascript ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/devstack/0000775000175000017500000000000000000000000016461 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/devstack/files/0000775000175000017500000000000000000000000017563 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/files/apache-watcher-api.template0000664000175000017500000000274500000000000024753 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using the # Watcher API through mod_wsgi. This version assumes you are # running devstack to configure the software. Listen %WATCHER_SERVICE_PORT% WSGIDaemonProcess watcher-api user=%USER% processes=%APIWORKERS% threads=1 display-name=%{GROUP} WSGIScriptAlias / %WATCHER_WSGI_DIR%/app.wsgi WSGIApplicationGroup %{GLOBAL} WSGIProcessGroup watcher-api WSGIPassAuthorization On ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/watcher-api.log CustomLog /var/log/%APACHE_NAME%/watcher-api-access.log combined WSGIProcessGroup watcher-api WSGIApplicationGroup %{GLOBAL} = 2.4> Require all granted Order allow,deny Allow from all ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/devstack/lib/0000775000175000017500000000000000000000000017227 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/lib/watcher0000664000175000017500000002634100000000000020615 0ustar00zuulzuul00000000000000# lib/watcher # Functions to control the configuration and operation of the watcher services # Dependencies: # # - ``functions`` file # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # ``stack.sh`` calls the entry points in this order: # # - is_watcher_enabled # - install_watcher # - configure_watcher # - create_watcher_conf # - init_watcher # - start_watcher # - stop_watcher # - cleanup_watcher # Save trace setting _XTRACE_WATCHER=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- # Set up default directories WATCHER_REPO=${WATCHER_REPO:-${GIT_BASE}/openstack/watcher.git} WATCHER_BRANCH=${WATCHER_BRANCH:-master} WATCHER_DIR=$DEST/watcher GITREPO["python-watcherclient"]=${WATCHERCLIENT_REPO:-${GIT_BASE}/openstack/python-watcherclient.git} GITBRANCH["python-watcherclient"]=${WATCHERCLIENT_BRANCH:-master} GITDIR["python-watcherclient"]=$DEST/python-watcherclient WATCHER_STATE_PATH=${WATCHER_STATE_PATH:=$DATA_DIR/watcher} WATCHER_CONF_DIR=/etc/watcher WATCHER_CONF=$WATCHER_CONF_DIR/watcher.conf WATCHER_POLICY_YAML=$WATCHER_CONF_DIR/policy.yaml.sample WATCHER_DEVSTACK_DIR=$WATCHER_DIR/devstack WATCHER_DEVSTACK_FILES_DIR=$WATCHER_DEVSTACK_DIR/files if is_ssl_enabled_service "watcher" || is_service_enabled tls-proxy; then WATCHER_SERVICE_PROTOCOL="https" fi # Support entry points installation of console scripts if [[ -d $WATCHER_DIR/bin ]]; then WATCHER_BIN_DIR=$WATCHER_DIR/bin else WATCHER_BIN_DIR=$(get_python_exec_prefix) fi # There are 2 modes, which is "uwsgi" which runs with an apache # proxy uwsgi in front of it, or "mod_wsgi", which runs in # apache. mod_wsgi is deprecated, don't use it. WATCHER_USE_WSGI_MODE=${WATCHER_USE_WSGI_MODE:-$WSGI_MODE} WATCHER_UWSGI=$WATCHER_BIN_DIR/watcher-api-wsgi WATCHER_UWSGI_CONF=$WATCHER_CONF_DIR/watcher-uwsgi.ini if is_suse; then WATCHER_WSGI_DIR=${WATCHER_WSGI_DIR:-/srv/www/htdocs/watcher} else WATCHER_WSGI_DIR=${WATCHER_WSGI_DIR:-/var/www/watcher} fi # Public facing bits WATCHER_SERVICE_HOST=${WATCHER_SERVICE_HOST:-$SERVICE_HOST} WATCHER_SERVICE_PORT=${WATCHER_SERVICE_PORT:-9322} WATCHER_SERVICE_PORT_INT=${WATCHER_SERVICE_PORT_INT:-19322} WATCHER_SERVICE_PROTOCOL=${WATCHER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then WATCHER_API_URL="$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST/infra-optim" else WATCHER_API_URL="$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" fi # Entry Points # ------------ # Test if any watcher services are enabled # is_watcher_enabled function is_watcher_enabled { [[ ,${ENABLED_SERVICES} =~ ,"watcher-" ]] && return 0 return 1 } #_cleanup_watcher_apache_wsgi - Remove wsgi files, #disable and remove apache vhost file function _cleanup_watcher_apache_wsgi { sudo rm -rf $WATCHER_WSGI_DIR sudo rm -f $(apache_site_config_for watcher-api) restart_apache_server } # cleanup_watcher() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_watcher { sudo rm -rf $WATCHER_STATE_PATH if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then remove_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI" else _cleanup_watcher_apache_wsgi fi } # configure_watcher() - Set config files, create data dirs, etc function configure_watcher { # Put config files in ``/etc/watcher`` for everyone to find sudo install -d -o $STACK_USER $WATCHER_CONF_DIR local project=watcher local project_uc project_uc=$(echo watcher|tr a-z A-Z) local conf_dir="${project_uc}_CONF_DIR" # eval conf dir to get the variable conf_dir="${!conf_dir}" local project_dir="${project_uc}_DIR" # eval project dir to get the variable project_dir="${!project_dir}" local sample_conf_dir="${project_dir}/etc/${project}" local sample_policy_dir="${project_dir}/etc/${project}/policy.d" local sample_policy_generator="${project_dir}/etc/${project}/oslo-policy-generator/watcher-policy-generator.conf" # first generate policy.yaml oslopolicy-sample-generator --config-file $sample_policy_generator # then optionally copy over policy.d if [[ -d $sample_policy_dir ]]; then cp -r $sample_policy_dir $conf_dir/policy.d fi # Rebuild the config file from scratch create_watcher_conf } # create_watcher_accounts() - Set up common required watcher accounts # # Project User Roles # ------------------------------------------------------------------ # SERVICE_TENANT_NAME watcher service function create_watcher_accounts { create_service_user "watcher" "admin" local watcher_service=$(get_or_create_service "watcher" \ "infra-optim" "Watcher Infrastructure Optimization Service") get_or_create_endpoint $watcher_service \ "$REGION_NAME" \ "$WATCHER_API_URL"\ "$WATCHER_API_URL"\ "$WATCHER_API_URL" } # _config_watcher_apache_wsgi() - Set WSGI config files of watcher function _config_watcher_apache_wsgi { local watcher_apache_conf if [[ "$WATCHER_USE_WSGI_MODE" == "mod_wsgi" ]]; then local service_port=$WATCHER_SERVICE_PORT if is_service_enabled tls-proxy; then service_port=$WATCHER_SERVICE_PORT_INT service_protocol="http" fi sudo mkdir -p $WATCHER_WSGI_DIR sudo cp $WATCHER_DIR/watcher/api/app.wsgi $WATCHER_WSGI_DIR/app.wsgi watcher_apache_conf=$(apache_site_config_for watcher-api) sudo cp $WATCHER_DEVSTACK_FILES_DIR/apache-watcher-api.template $watcher_apache_conf sudo sed -e " s|%WATCHER_SERVICE_PORT%|$service_port|g; s|%WATCHER_WSGI_DIR%|$WATCHER_WSGI_DIR|g; s|%USER%|$STACK_USER|g; s|%APIWORKERS%|$API_WORKERS|g; s|%APACHE_NAME%|$APACHE_NAME|g; " -i $watcher_apache_conf enable_apache_site watcher-api fi } # create_watcher_conf() - Create a new watcher.conf file function create_watcher_conf { # (Re)create ``watcher.conf`` rm -f $WATCHER_CONF iniset $WATCHER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" iniset $WATCHER_CONF DEFAULT control_exchange watcher iniset_rpc_backend watcher $WATCHER_CONF iniset $WATCHER_CONF database connection $(database_connection_url watcher) iniset $WATCHER_CONF api host "$(ipv6_unquote $WATCHER_SERVICE_HOST)" if is_service_enabled tls-proxy; then iniset $WATCHER_CONF api host "$(ipv6_unquote $WATCHER_SERVICE_HOST)" iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT_INT" # iniset $WATCHER_CONF api enable_ssl_api "True" else if [[ "$WATCHER_USE_WSGI_MODE" == "mod_wsgi" ]]; then iniset $WATCHER_CONF api host "$(ipv6_unquote $WATCHER_SERVICE_HOST)" iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT" fi fi iniset $WATCHER_CONF oslo_policy policy_file $WATCHER_POLICY_YAML iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2" configure_keystone_authtoken_middleware $WATCHER_CONF watcher configure_keystone_authtoken_middleware $WATCHER_CONF watcher "watcher_clients_auth" if is_fedora || is_suse; then # watcher defaults to /usr/local/bin, but fedora and suse pip like to # install things in /usr/bin iniset $WATCHER_CONF DEFAULT bindir "/usr/bin" fi if [ -n "$WATCHER_STATE_PATH" ]; then iniset $WATCHER_CONF DEFAULT state_path "$WATCHER_STATE_PATH" iniset $WATCHER_CONF oslo_concurrency lock_path "$WATCHER_STATE_PATH" fi if [ "$SYSLOG" != "False" ]; then iniset $WATCHER_CONF DEFAULT use_syslog "True" fi # Format logging setup_logging $WATCHER_CONF #config apache files if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then write_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI" "/infra-optim" else _config_watcher_apache_wsgi fi # Register SSL certificates if provided if is_ssl_enabled_service watcher; then ensure_certificates WATCHER iniset $WATCHER_CONF DEFAULT ssl_cert_file "$WATCHER_SSL_CERT" iniset $WATCHER_CONF DEFAULT ssl_key_file "$WATCHER_SSL_KEY" iniset $WATCHER_CONF DEFAULT enabled_ssl_apis "$WATCHER_ENABLED_APIS" fi } # init_watcher() - Initialize databases, etc. function init_watcher { # clean up from previous (possibly aborted) runs # create required data files if is_service_enabled $DATABASE_BACKENDS && is_service_enabled watcher-api; then # (Re)create watcher database recreate_database watcher # Create watcher schema $WATCHER_BIN_DIR/watcher-db-manage --config-file $WATCHER_CONF upgrade fi } # install_watcherclient() - Collect source and prepare function install_watcherclient { if use_library_from_git "python-watcherclient"; then git_clone_by_name "python-watcherclient" setup_dev_lib "python-watcherclient" fi if [[ "$GLOBAL_VENV" == "True" ]]; then sudo ln -sf /opt/stack/data/venv/bin/watcher /usr/local/bin fi } # install_watcher() - Collect source and prepare function install_watcher { git_clone $WATCHER_REPO $WATCHER_DIR $WATCHER_BRANCH setup_develop $WATCHER_DIR if [[ "$WATCHER_USE_WSGI_MODE" == "mod_wsgi" ]]; then install_apache_wsgi fi } # start_watcher_api() - Start the API process ahead of other things function start_watcher_api { # Get right service port for testing local service_port=$WATCHER_SERVICE_PORT local service_protocol=$WATCHER_SERVICE_PROTOCOL local watcher_url if is_service_enabled tls-proxy; then service_port=$WATCHER_SERVICE_PORT_INT service_protocol="http" fi if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then run_process "watcher-api" "$(which uwsgi) --procname-prefix watcher-api --ini $WATCHER_UWSGI_CONF" watcher_url=$service_protocol://$SERVICE_HOST/infra-optim else watcher_url=$service_protocol://$SERVICE_HOST:$service_port enable_apache_site watcher-api restart_apache_server # Start proxies if enabled if is_service_enabled tls-proxy; then start_tls_proxy watcher '*' $WATCHER_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT fi fi echo "Waiting for watcher-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $watcher_url; then die $LINENO "watcher-api did not start" fi } # start_watcher() - Start running processes, including screen function start_watcher { # ``run_process`` checks ``is_service_enabled``, it is not needed here start_watcher_api run_process watcher-decision-engine "$WATCHER_BIN_DIR/watcher-decision-engine --config-file $WATCHER_CONF" run_process watcher-applier "$WATCHER_BIN_DIR/watcher-applier --config-file $WATCHER_CONF" } # stop_watcher() - Stop running processes (non-screen) function stop_watcher { if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then stop_process watcher-api else disable_apache_site watcher-api restart_apache_server fi for serv in watcher-decision-engine watcher-applier; do stop_process $serv done } # Restore xtrace $_XTRACE_WATCHER # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/local.conf.compute0000664000175000017500000000322100000000000022073 0ustar00zuulzuul00000000000000# Sample ``local.conf`` for compute node for Watcher development # NOTE: Copy this file to the root DevStack directory for it to work properly. [[local|localrc]] ADMIN_PASSWORD=nomoresecrete DATABASE_PASSWORD=stackdb RABBIT_PASSWORD=stackqueue SERVICE_PASSWORD=$ADMIN_PASSWORD SERVICE_TOKEN=azertytoken HOST_IP=192.168.42.2 # Change this to this compute node's IP address #HOST_IPV6=2001:db8::7 FLAT_INTERFACE=eth0 FIXED_RANGE=10.254.1.0/24 # Change this to whatever your network is NETWORK_GATEWAY=10.254.1.1 # Change this for your network MULTI_HOST=1 SERVICE_HOST=192.168.42.1 # Change this to the IP of your controller node MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST GLANCE_HOSTPORT=${SERVICE_HOST}:9292 DATABASE_TYPE=mysql # Enable services (including neutron) ENABLED_SERVICES=n-cpu,n-api-meta,c-vol,q-agt,placement-client NOVA_VNC_ENABLED=True NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html" VNCSERVER_LISTEN=0.0.0.0 VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP # or HOST_IPV6 NOVA_INSTANCES_PATH=/opt/stack/data/instances # Enable the Ceilometer plugin for the compute agent enable_plugin ceilometer https://opendev.org/openstack/ceilometer disable_service ceilometer-acentral,ceilometer-collector,ceilometer-api LOGFILE=$DEST/logs/stack.sh.log LOGDAYS=2 [[post-config|$NOVA_CONF]] [DEFAULT] compute_monitors=cpu.virt_driver [notifications] # Enable both versioned and unversioned notifications. Watcher only # uses versioned notifications but ceilometer uses unversioned. We # can change this to just versioned when ceilometer handles versioned # notifications from nova: https://bugs.launchpad.net/ceilometer/+bug/1665449 notification_format=both ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/local.conf.controller0000664000175000017500000000334300000000000022607 0ustar00zuulzuul00000000000000# Sample ``local.conf`` for controller node for Watcher development # NOTE: Copy this file to the root DevStack directory for it to work properly. [[local|localrc]] ADMIN_PASSWORD=nomoresecrete DATABASE_PASSWORD=stackdb RABBIT_PASSWORD=stackqueue SERVICE_PASSWORD=$ADMIN_PASSWORD SERVICE_TOKEN=azertytoken HOST_IP=192.168.42.1 # Change this to your controller node IP address #HOST_IPV6=2001:db8::7 FLAT_INTERFACE=eth0 FIXED_RANGE=10.254.1.0/24 # Change this to whatever your network is NETWORK_GATEWAY=10.254.1.1 # Change this for your network MULTI_HOST=1 #Set this to FALSE if do not want to run watcher-api behind mod-wsgi #WATCHER_USE_MOD_WSGI=TRUE # This is the controller node, so disable nova-compute disable_service n-cpu # Enable the Watcher Dashboard plugin enable_plugin watcher-dashboard https://opendev.org/openstack/watcher-dashboard # Enable the Watcher plugin enable_plugin watcher https://opendev.org/openstack/watcher # Enable the Ceilometer plugin enable_plugin ceilometer https://opendev.org/openstack/ceilometer # This is the controller node, so disable the ceilometer compute agent disable_service ceilometer-acompute # Enable the ceilometer api explicitly(bug:1667678) enable_service ceilometer-api # Enable the Gnocchi plugin enable_plugin gnocchi https://github.com/gnocchixyz/gnocchi LOGFILE=$DEST/logs/stack.sh.log LOGDAYS=2 [[post-config|$NOVA_CONF]] [DEFAULT] compute_monitors=cpu.virt_driver [notifications] # Enable both versioned and unversioned notifications. Watcher only # uses versioned notifications but ceilometer uses unversioned. We # can change this to just versioned when ceilometer handles versioned # notifications from nova: https://bugs.launchpad.net/ceilometer/+bug/1665449 notification_format=both ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/override-defaults0000664000175000017500000000063200000000000022031 0ustar00zuulzuul00000000000000# Plug-in overrides # https://docs.openstack.org/devstack/latest/plugins.html#plugin-interface # Enable both versioned and unversioned notifications. Watcher only # uses versioned notifications but ceilometer uses unversioned. We # can change this to just versioned when ceilometer handles # versioned notifications from nova: # https://bugs.launchpad.net/ceilometer/+bug/1665449 NOVA_NOTIFICATION_FORMAT=both././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/plugin.sh0000664000175000017500000000247000000000000020316 0ustar00zuulzuul00000000000000# plugin.sh - DevStack plugin script to install watcher # Save trace setting _XTRACE_WATCHER_PLUGIN=$(set +o | grep xtrace) set -o xtrace echo_summary "watcher's plugin.sh was called..." . $DEST/watcher/devstack/lib/watcher # Show all of defined environment variables (set -o posix; set) if is_service_enabled watcher-api watcher-decision-engine watcher-applier; then if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then echo_summary "Before Installing watcher" elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing watcher" install_watcher LIBS_FROM_GIT="${LIBS_FROM_GIT},python-watcherclient" install_watcherclient cleanup_watcher elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring watcher" configure_watcher if is_service_enabled key; then create_watcher_accounts fi elif [[ "$1" == "stack" && "$2" == "extra" ]]; then # Initialize watcher init_watcher # Start the watcher components echo_summary "Starting watcher" start_watcher fi if [[ "$1" == "unstack" ]]; then stop_watcher fi if [[ "$1" == "clean" ]]; then cleanup_watcher fi fi # Restore xtrace $_XTRACE_WATCHER_PLUGIN ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/settings0000664000175000017500000000037000000000000020244 0ustar00zuulzuul00000000000000# DevStack settings # Make sure rabbit is enabled enable_service rabbit # Make sure mysql is enabled enable_service mysql # Enable Watcher services enable_service watcher-api enable_service watcher-decision-engine enable_service watcher-applier ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/devstack/upgrade/0000775000175000017500000000000000000000000020110 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/devstack/upgrade/from_rocky/0000775000175000017500000000000000000000000022262 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/upgrade/from_rocky/upgrade-watcher0000664000175000017500000000061500000000000025271 0ustar00zuulzuul00000000000000# ``upgrade-watcher`` function configure_watcher_upgrade { XTRACE=$(set +o | grep xtrace) set -o xtrace # Copy release-specific files sudo cp $TARGET_RELEASE_DIR/watcher/etc/watcher/watcher.conf $WATCHER_CONF_DIR/watcher.conf sudo cp $TARGET_RELEASE_DIR/watcher/etc/watcher/policy.yaml.sample $WATCHER_CONF_DIR/policy.yaml.sample # reset to previous state $XTRACE } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/upgrade/resources.sh0000775000175000017500000000636400000000000022472 0ustar00zuulzuul00000000000000#!/bin/bash set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $TOP_DIR/openrc admin demo set -o xtrace function _wait_for_status { while : do state=$("${@:2}" -f value -c State) [[ $state == "SUCCEEDED" ]] && break if [ $state == "ERROR" ]; then die $LINENO "ERROR creating audit" fi sleep 10 done } function create_audit_template { at_id=$(openstack optimize audittemplate create d1 dummy -s dummy -f value -c UUID) resource_save watcher at_id $at_id } function create_audit { audit_id=$(openstack optimize audit create -s dummy -g dummy -f value -c UUID) resource_save watcher audit_id $audit_id } function create_audit_with_autotrigger { audit_at_id=$(openstack optimize audit create -s dummy -g dummy -f value -c UUID --auto-trigger) resource_save watcher audit_at_id $audit_at_id } function verify_audit_template { local at_id=$(resource_get watcher at_id) openstack optimize audittemplate show $at_id } function verify_audit_with_autotrigger { local audit_at_id=$(resource_get watcher audit_at_id) _wait_for_status "SUCCEEDED" openstack optimize audit show $audit_at_id local actionplan_at_id=$(openstack optimize actionplan list --audit $audit_at_id -c UUID -f value) resource_save watcher actionplan_at $actionplan_at_id actionplan_at_state=$(openstack optimize actionplan show $actionplan_at_id -c State -f value) if [ $actionplan_at_state != "SUCCEEDED" ]; then die $LINENO "ERROR executing actionplan" fi } function verify_audit { local audit_id=$(resource_get watcher audit_id) _wait_for_status "SUCCEEDED" openstack optimize audit show $audit_id local actionplan_id=$(openstack optimize actionplan list --audit $audit_id -c UUID -f value) resource_save watcher actionplan $actionplan_id actionplan_state=$(openstack optimize actionplan show $actionplan_id -c State -f value) if [ $actionplan_state != "RECOMMENDED" ]; then die $LINENO "ERROR creating actionplan" fi } function verify_noapi { # currently no good way : } function delete_audit { local audit_id=$(resource_get watcher audit_id) local actionplan_id=$(resource_get watcher actionplan) watcher actionplan delete $actionplan_id openstack optimize audit delete $audit_id } function delete_audit_with_autotrigger { local audit_at_id=$(resource_get watcher audit_at_id) local actionplan_id=$(resource_get watcher actionplan_at) watcher actionplan delete $actionplan_id openstack optimize audit delete $audit_at_id } function delete_audit_template { local at_id=$(resource_get watcher at_id) openstack optimize audittemplate delete $at_id } function create { create_audit_template create_audit create_audit_with_autotrigger } function verify { verify_audit_template verify_audit verify_audit_with_autotrigger } function destroy { delete_audit_template delete_audit delete_audit_with_autotrigger } # Dispatcher case $1 in "create") create ;; "verify_noapi") verify_noapi ;; "verify") verify ;; "destroy") destroy ;; "force_destroy") set +o errexit destroy ;; esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/upgrade/settings0000664000175000017500000000146200000000000021676 0ustar00zuulzuul00000000000000register_project_for_upgrade watcher register_db_to_save watcher devstack_localrc base enable_plugin watcher https://opendev.org/openstack/watcher $BASE_DEVSTACK_BRANCH devstack_localrc target enable_plugin watcher https://opendev.org/openstack/watcher devstack_localrc base enable_service watcher-api watcher-decision-engine watcher-applier devstack_localrc target enable_service watcher-api watcher-decision-engine watcher-applier BASE_RUN_SMOKE=False TARGET_RUN_SMOKE=False # Enable both versioned and unversioned notifications. Watcher only # uses versioned notifications but ceilometer uses unversioned. We # can change this to just versioned when ceilometer handles # versioned notifications from nova: # https://bugs.launchpad.net/ceilometer/+bug/1665449 devstack_localrc base NOVA_NOTIFICATION_FORMAT=both ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/upgrade/shutdown.sh0000775000175000017500000000115400000000000022323 0ustar00zuulzuul00000000000000#!/bin/bash set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions # We need base DevStack functions for this source $BASE_DEVSTACK_DIR/functions source $BASE_DEVSTACK_DIR/stackrc # needed for status directory source $BASE_DEVSTACK_DIR/lib/tls source $BASE_DEVSTACK_DIR/lib/apache WATCHER_DEVSTACK_DIR=$(dirname $(dirname $0)) source $WATCHER_DEVSTACK_DIR/settings source $WATCHER_DEVSTACK_DIR/plugin.sh source $WATCHER_DEVSTACK_DIR/lib/watcher set -o xtrace stop_watcher # sanity check that service is actually down ensure_services_stopped watcher-api watcher-decision-engine watcher-applier ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/devstack/upgrade/upgrade.sh0000775000175000017500000000470600000000000022105 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-watcher`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "********************************************************************" echo "ERROR: Abort $0" echo "********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Upgrade watcher # ============ # Get functions from current DevStack source $TARGET_DEVSTACK_DIR/stackrc source $TARGET_DEVSTACK_DIR/lib/apache source $TARGET_DEVSTACK_DIR/lib/tls source $TARGET_DEVSTACK_DIR/lib/keystone source $TOP_DIR/openrc admin admin source $(dirname $(dirname $BASH_SOURCE))/settings source $(dirname $(dirname $BASH_SOURCE))/plugin.sh # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace # Save current config files for posterity [[ -d $SAVE_DIR/etc.watcher ]] || cp -pr $WATCHER_CONF_DIR $SAVE_DIR/etc.watcher # Install the target watcher install_watcher # calls upgrade-watcher for specific release upgrade_project watcher $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH if [[ ! -f "$WATCHER_UWSGI_CONF" ]] && [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]] then write_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI" "/infra-optim" endpoints=$(openstack endpoint list --service watcher -c ID -f value) for id in $endpoints; do openstack endpoint delete $id done create_watcher_accounts fi # Migrate the database $WATCHER_BIN_DIR/watcher-db-manage upgrade || die $LINO "DB migration error" start_watcher # Don't succeed unless the services come up ensure_services_started watcher-api watcher-decision-engine watcher-applier set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/doc/0000775000175000017500000000000000000000000015422 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/dictionary.txt0000664000175000017500000000003300000000000020324 0ustar00zuulzuul00000000000000thirdparty assertin notin ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/doc/ext/0000775000175000017500000000000000000000000016222 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/ext/__init__.py0000664000175000017500000000000000000000000020321 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/ext/term.py0000664000175000017500000001231500000000000017545 0ustar00zuulzuul00000000000000# Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import inspect from docutils import nodes from docutils.parsers import rst from docutils import statemachine from watcher.version import version_string class BaseWatcherDirective(rst.Directive): def __init__(self, name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): super(BaseWatcherDirective, self).__init__( name, arguments, options, content, lineno, content_offset, block_text, state, state_machine) self.result = statemachine.ViewList() def run(self): raise NotImplementedError('Must override run() is subclass.') def add_line(self, line, *lineno): """Append one line of generated reST to the output.""" self.result.append(line, rst.directives.unchanged, *lineno) def add_textblock(self, textblock): for line in textblock.splitlines(): self.add_line(line) def add_object_docstring(self, obj): obj_raw_docstring = obj.__doc__ or "" # Maybe it's within the __init__ if not obj_raw_docstring and hasattr(obj, "__init__"): if obj.__init__.__doc__: obj_raw_docstring = obj.__init__.__doc__ if not obj_raw_docstring: # Raise a warning to make the tests fail with doc8 raise self.error("No docstring available for %s!" % obj) obj_docstring = inspect.cleandoc(obj_raw_docstring) self.add_textblock(obj_docstring) class WatcherTerm(BaseWatcherDirective): """Directive to import an RST formatted docstring into the Watcher glossary **How to use it** # inside your .py file class DocumentedObject(object): '''My *.rst* docstring''' # Inside your .rst file .. watcher-term:: import.path.to.your.DocumentedObject This directive will then import the docstring and then interpret it. """ # You need to put an import path as an argument for this directive to work required_arguments = 1 def run(self): cls_path = self.arguments[0] try: try: cls = importlib.import_module(cls_path) except ImportError: module_name, cls_name = cls_path.rsplit('.', 1) mod = importlib.import_module(module_name) cls = getattr(mod, cls_name) except Exception as exc: raise self.error(exc) self.add_object_docstring(cls) node = nodes.paragraph() node.document = self.state.document self.state.nested_parse(self.result, 0, node) return node.children class WatcherFunc(BaseWatcherDirective): """Directive to import a value returned by a func into the Watcher doc **How to use it** # inside your .py file class Bar(object): def foo(object): return foo_string # Inside your .rst file .. watcher-func:: import.path.to.your.Bar.foo node_classname node_classname is decumented here: http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html This directive will then import the value and then interpret it. """ # You need to put an import path as an argument for this directive to work # required_arguments = 1 # optional_arguments = 1 option_spec = {'format': rst.directives.unchanged} has_content = True def run(self): if not self.content: error = self.state_machine.reporter.error( 'The "%s" directive is empty; content required.' % self.name, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] func_path = self.content[0] try: cls_path, func_name = func_path.rsplit('.', 1) module_name, cls_name = cls_path.rsplit('.', 1) mod = importlib.import_module(module_name) cls = getattr(mod, cls_name) except Exception as exc: raise self.error(exc) cls_obj = cls() func = getattr(cls_obj, func_name) textblock = func() if not isinstance(textblock, str): textblock = str(textblock) self.add_textblock(textblock) try: node_class = getattr(nodes, self.options.get('format', 'paragraph')) except Exception as exc: raise self.error(exc) node = node_class() node.document = self.state.document self.state.nested_parse(self.result, 0, node) return [node] def setup(app): app.add_directive('watcher-term', WatcherTerm) app.add_directive('watcher-func', WatcherFunc) return {'version': version_string} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/ext/versioned_notifications.py0000664000175000017500000001022300000000000023521 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This provides a sphinx extension able to list the implemented versioned notifications into the developer documentation. It is used via a single directive in the .rst file .. versioned_notifications:: """ from docutils.parsers.rst import Directive from docutils import nodes from watcher.notifications import base as notification from watcher.objects import base class VersionedNotificationDirective(Directive): SAMPLE_ROOT = 'doc/notification_samples/' TOGGLE_SCRIPT = """ """ def run(self): notifications = self._collect_notifications() return self._build_markup(notifications) def _collect_notifications(self): base.WatcherObjectRegistry.register_notification_objects() notifications = [] ovos = base.WatcherObjectRegistry.obj_classes() for name, cls in ovos.items(): cls = cls[0] if (issubclass(cls, notification.NotificationBase) and cls != notification.NotificationBase): payload_name = cls.fields['payload'].objname payload_cls = ovos[payload_name][0] for sample in cls.samples: notifications.append((cls.__name__, payload_cls.__name__, sample)) return sorted(notifications) def _build_markup(self, notifications): content = [] cols = ['Event type', 'Notification class', 'Payload class', 'Sample'] table = nodes.table() content.append(table) group = nodes.tgroup(cols=len(cols)) table.append(group) head = nodes.thead() group.append(head) for _ in cols: group.append(nodes.colspec(colwidth=1)) body = nodes.tbody() group.append(body) # fill the table header row = nodes.row() body.append(row) for col_name in cols: col = nodes.entry() row.append(col) text = nodes.strong(text=col_name) col.append(text) # fill the table content, one notification per row for name, payload, sample_file in notifications: event_type = sample_file[0: -5].replace('-', '.') row = nodes.row() body.append(row) col = nodes.entry() row.append(col) text = nodes.literal(text=event_type) col.append(text) col = nodes.entry() row.append(col) text = nodes.literal(text=name) col.append(text) col = nodes.entry() row.append(col) text = nodes.literal(text=payload) col.append(text) col = nodes.entry() row.append(col) with open(self.SAMPLE_ROOT + sample_file, 'r') as f: sample_content = f.read() event_type = sample_file[0: -5] html_str = self.TOGGLE_SCRIPT % ((event_type, ) * 3) html_str += ("" % event_type) html_str += ("
%s
" % (event_type, sample_content)) raw = nodes.raw('', html_str, format="html") col.append(raw) return content def setup(app): app.add_directive('versioned_notifications', VersionedNotificationDirective) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/doc/notification_samples/0000775000175000017500000000000000000000000021634 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action-cancel-end.json0000664000175000017500000000241600000000000025776 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionCancelPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": null, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "CANCELLED", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": [], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "CANCELLING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.cancel.end", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action-cancel-error.json0000664000175000017500000000325400000000000026362 0ustar00zuulzuul00000000000000{ "priority": "ERROR", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionCancelPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ExceptionPayload", "watcher_object.data": { "module_name": "watcher.tests.notifications.test_action_notification", "exception": "WatcherException", "exception_message": "TEST", "function_name": "test_send_action_cancel_with_error" } }, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "FAILED", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": [], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "CANCELLING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.cancel.error", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action-cancel-start.json0000664000175000017500000000242100000000000026361 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionCancelPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": null, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "CANCELLING", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": [], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "CANCELLING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.cancel.start", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action-create.json0000664000175000017500000000236000000000000025246 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionCreatePayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "PENDING", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": {}, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "event_type": "action.create", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action-delete.json0000664000175000017500000000236000000000000025245 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionDeletePayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "DELETED", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": {}, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "event_type": "action.delete", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action-execution-end.json0000664000175000017500000000242100000000000026550 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionExecutionPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": null, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "SUCCEEDED", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": [], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.execution.end", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action-execution-error.json0000664000175000017500000000326100000000000027136 0ustar00zuulzuul00000000000000{ "priority": "ERROR", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionExecutionPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ExceptionPayload", "watcher_object.data": { "module_name": "watcher.tests.notifications.test_action_notification", "exception": "WatcherException", "exception_message": "TEST", "function_name": "test_send_action_execution_with_error" } }, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "FAILED", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy":[], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.execution.error", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action-execution-start.json0000664000175000017500000000242100000000000027137 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionExecutionPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": null, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": [], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.execution.start", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action-update.json0000664000175000017500000000301600000000000025264 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionUpdatePayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state_update": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionStateUpdatePayload", "watcher_object.data": { "old_state": "PENDING", "state": "ONGOING" } }, "state": "ONGOING", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": {}, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.update", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action_plan-cancel-end.json0000664000175000017500000000352100000000000027006 0ustar00zuulzuul00000000000000{ "event_type": "action_plan.cancel.end", "payload": { "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanCancelPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "scope": [], "audit_type": "ONESHOT", "state": "SUCCEEDED", "parameters": {}, "interval": null, "updated_at": null } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "fault": null, "state": "CANCELLED", "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "TEST", "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "parameters_spec": {}, "display_name": "test strategy", "updated_at": null } }, "updated_at": null } }, "priority": "INFO", "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", "timestamp": "2016-10-18 09:52:05.219414", "publisher_id": "infra-optim:node0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action_plan-cancel-error.json0000664000175000017500000000437500000000000027401 0ustar00zuulzuul00000000000000{ "event_type": "action_plan.cancel.error", "publisher_id": "infra-optim:node0", "priority": "ERROR", "message_id": "9a45c5ae-0e21-4300-8fa0-5555d52a66d9", "payload": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanCancelPayload", "watcher_object.data": { "fault": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "ExceptionPayload", "watcher_object.data": { "exception_message": "TEST", "module_name": "watcher.tests.notifications.test_action_plan_notification", "function_name": "test_send_action_plan_cancel_with_error", "exception": "WatcherException" } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "created_at": "2016-10-18T09:52:05Z", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.data": { "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "created_at": "2016-10-18T09:52:05Z", "name": "TEST", "updated_at": null, "display_name": "test strategy", "parameters_spec": {}, "deleted_at": null } }, "updated_at": null, "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.data": { "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "created_at": "2016-10-18T09:52:05Z", "scope": [], "updated_at": null, "audit_type": "ONESHOT", "interval": null, "deleted_at": null, "state": "SUCCEEDED" } }, "global_efficacy": [], "state": "CANCELLING" } }, "timestamp": "2016-10-18 09:52:05.219414" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action_plan-cancel-start.json0000664000175000017500000000352400000000000027400 0ustar00zuulzuul00000000000000{ "event_type": "action_plan.cancel.start", "payload": { "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanCancelPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "scope": [], "audit_type": "ONESHOT", "state": "SUCCEEDED", "parameters": {}, "interval": null, "updated_at": null } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "fault": null, "state": "CANCELLING", "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "TEST", "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "parameters_spec": {}, "display_name": "test strategy", "updated_at": null } }, "updated_at": null } }, "priority": "INFO", "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", "timestamp": "2016-10-18 09:52:05.219414", "publisher_id": "infra-optim:node0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action_plan-create.json0000664000175000017500000000350400000000000026261 0ustar00zuulzuul00000000000000{ "publisher_id": "infra-optim:node0", "payload": { "watcher_object.version": "1.0", "watcher_object.data": { "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.version": "1.0", "watcher_object.data": { "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "display_name": "test strategy", "name": "TEST", "updated_at": null, "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "deleted_at": null }, "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload" }, "created_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.version": "1.0", "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "scope": [], "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "parameters": {}, "interval": null, "deleted_at": null, "state": "PENDING", "created_at": "2016-10-18T09:52:05Z", "updated_at": null }, "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload" }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": {}, "deleted_at": null, "state": "RECOMMENDED", "updated_at": null }, "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanCreatePayload" }, "priority": "INFO", "message_id": "5148bff1-ea06-4ad6-8e4e-8c85ca5eb629", "event_type": "action_plan.create", "timestamp": "2016-10-18 09:52:05.219414" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action_plan-delete.json0000664000175000017500000000352200000000000026260 0ustar00zuulzuul00000000000000{ "publisher_id": "infra-optim:node0", "timestamp": "2016-10-18 09:52:05.219414", "payload": { "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "created_at": "2016-10-18T09:52:05Z", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "name": "my_audit", "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "interval": null, "audit_type": "ONESHOT", "scope": [], "updated_at": null, "deleted_at": null, "state": "PENDING", "created_at": "2016-10-18T09:52:05Z", "parameters": {} }, "watcher_object.version": "1.0", "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher" }, "global_efficacy": {}, "updated_at": null, "deleted_at": null, "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.data": { "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "created_at": "2016-10-18T09:52:05Z", "name": "TEST", "display_name": "test strategy", "deleted_at": null, "updated_at": null, "parameters_spec": {} }, "watcher_object.version": "1.0", "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher" }, "state": "DELETED" }, "watcher_object.version": "1.0", "watcher_object.name": "ActionPlanDeletePayload", "watcher_object.namespace": "watcher" }, "event_type": "action_plan.delete", "message_id": "3d137686-a1fd-4683-ab40-c4210aac2140", "priority": "INFO" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action_plan-execution-end.json0000664000175000017500000000356000000000000027567 0ustar00zuulzuul00000000000000{ "event_type": "action_plan.execution.end", "payload": { "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanActionPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "my_audit", "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "scope": [], "audit_type": "ONESHOT", "state": "SUCCEEDED", "parameters": {}, "interval": null, "updated_at": null } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "fault": null, "state": "ONGOING", "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "TEST", "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "parameters_spec": {}, "display_name": "test strategy", "updated_at": null } }, "updated_at": null } }, "priority": "INFO", "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", "timestamp": "2016-10-18 09:52:05.219414", "publisher_id": "infra-optim:node0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action_plan-execution-error.json0000664000175000017500000000443100000000000030150 0ustar00zuulzuul00000000000000{ "event_type": "action_plan.execution.error", "publisher_id": "infra-optim:node0", "priority": "ERROR", "message_id": "9a45c5ae-0e21-4300-8fa0-5555d52a66d9", "payload": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanActionPayload", "watcher_object.data": { "fault": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "ExceptionPayload", "watcher_object.data": { "exception_message": "TEST", "module_name": "watcher.tests.notifications.test_action_plan_notification", "function_name": "test_send_action_plan_action_with_error", "exception": "WatcherException" } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "created_at": "2016-10-18T09:52:05Z", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.data": { "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "created_at": "2016-10-18T09:52:05Z", "name": "TEST", "updated_at": null, "display_name": "test strategy", "parameters_spec": {}, "deleted_at": null } }, "updated_at": null, "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.data": { "parameters": {}, "name": "my_audit", "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "created_at": "2016-10-18T09:52:05Z", "scope": [], "updated_at": null, "audit_type": "ONESHOT", "interval": null, "deleted_at": null, "state": "PENDING" } }, "global_efficacy": [], "state": "ONGOING" } }, "timestamp": "2016-10-18 09:52:05.219414" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action_plan-execution-start.json0000664000175000017500000000356000000000000030156 0ustar00zuulzuul00000000000000{ "event_type": "action_plan.execution.start", "payload": { "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanActionPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "my_audit", "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "scope": [], "audit_type": "ONESHOT", "state": "PENDING", "parameters": {}, "interval": null, "updated_at": null } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "fault": null, "state": "ONGOING", "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "TEST", "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "parameters_spec": {}, "display_name": "test strategy", "updated_at": null } }, "updated_at": null } }, "priority": "INFO", "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", "timestamp": "2016-10-18 09:52:05.219414", "publisher_id": "infra-optim:node0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/action_plan-update.json0000664000175000017500000000416400000000000026303 0ustar00zuulzuul00000000000000{ "payload": { "watcher_object.version": "1.0", "watcher_object.data": { "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.version": "1.0", "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "scope": [], "created_at": "2016-10-18T09:52:05Z", "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "interval": null, "updated_at": null, "state": "PENDING", "deleted_at": null, "parameters": {} }, "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload" }, "created_at": "2016-10-18T09:52:05Z", "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "updated_at": null, "state_update": { "watcher_object.version": "1.0", "watcher_object.data": { "old_state": "PENDING", "state": "ONGOING" }, "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanStateUpdatePayload" }, "state": "ONGOING", "deleted_at": null, "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.version": "1.0", "watcher_object.data": { "name": "TEST", "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "display_name": "test strategy", "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "deleted_at": null, "parameters_spec": {} }, "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload" }, "global_efficacy": {} }, "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanUpdatePayload" }, "publisher_id": "infra-optim:node0", "priority": "INFO", "timestamp": "2016-10-18 09:52:05.219414", "event_type": "action_plan.update", "message_id": "0a8a7329-fd5a-4ec6-97d7-2b776ce51a4c" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/audit-create.json0000664000175000017500000000433500000000000025103 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "PENDING", "updated_at": null, "deleted_at": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditCreatePayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.create", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/audit-delete.json0000664000175000017500000000433500000000000025102 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "DELETED", "updated_at": null, "deleted_at": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditDeletePayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.delete", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/audit-planner-end.json0000664000175000017500000000436700000000000026050 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.planner.end", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/audit-planner-error.json0000664000175000017500000000522600000000000026426 0ustar00zuulzuul00000000000000{ "priority": "ERROR", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": { "watcher_object.data": { "exception": "WatcherException", "exception_message": "TEST", "function_name": "test_send_audit_action_with_error", "module_name": "watcher.tests.notifications.test_audit_notification" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.planner.error", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/audit-planner-start.json0000664000175000017500000000437100000000000026432 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.planner.start", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/audit-strategy-end.json0000664000175000017500000000437000000000000026245 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.strategy.end", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/audit-strategy-error.json0000664000175000017500000000522700000000000026632 0ustar00zuulzuul00000000000000{ "priority": "ERROR", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": { "watcher_object.data": { "exception": "WatcherException", "exception_message": "TEST", "function_name": "test_send_audit_action_with_error", "module_name": "watcher.tests.notifications.test_audit_notification" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.strategy.error", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/audit-strategy-start.json0000664000175000017500000000437200000000000026636 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.strategy.start", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/audit-update.json0000664000175000017500000000477200000000000025127 0ustar00zuulzuul00000000000000{ "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:51:38.722986 ", "payload": { "watcher_object.name": "AuditUpdatePayload", "watcher_object.data": { "name": "my_audit", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.name": "StrategyPayload", "watcher_object.data": { "name": "dummy", "parameters_spec": { "properties": { "para2": { "default": "hello", "type": "string", "description": "string parameter example" }, "para1": { "maximum": 10.2, "default": 3.2, "minimum": 1.0, "description": "number parameter example", "type": "number" } } }, "updated_at": null, "display_name": "Dummy strategy", "deleted_at": null, "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "created_at": "2016-11-04T16:25:35Z" }, "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "scope": [], "created_at": "2016-11-04T16:51:21Z", "uuid": "f1e0d912-afd9-4bf2-91ef-c99cd08cc1ef", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.name": "GoalPayload", "watcher_object.data": { "efficacy_specification": [], "updated_at": null, "name": "dummy", "display_name": "Dummy goal", "deleted_at": null, "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "created_at": "2016-11-04T16:25:35Z" }, "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "parameters": { "para2": "hello", "para1": 3.2 }, "deleted_at": null, "state_update": { "watcher_object.name": "AuditStateUpdatePayload", "watcher_object.data": { "state": "ONGOING", "old_state": "PENDING" }, "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "interval": null, "updated_at": null, "state": "ONGOING", "audit_type": "ONESHOT" }, "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "priority": "INFO", "event_type": "audit.update", "message_id": "697fdf55-7252-4b6c-a2c2-5b9e85f6342c" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/infra-optim-exception.json0000664000175000017500000000112100000000000026743 0ustar00zuulzuul00000000000000{ "event_type": "infra-optim.exception", "payload": { "watcher_object.data": { "exception": "NoAvailableStrategyForGoal", "exception_message": "No strategy could be found to achieve the server_consolidation goal.", "function_name": "_aggregate_create_in_db", "module_name": "watcher.objects.aggregate" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "priority": "ERROR", "publisher_id": "watcher-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/notification_samples/service-update.json0000664000175000017500000000161400000000000025451 0ustar00zuulzuul00000000000000{ "payload": { "watcher_object.name": "ServiceUpdatePayload", "watcher_object.namespace": "watcher", "watcher_object.data": { "status_update": { "watcher_object.name": "ServiceStatusUpdatePayload", "watcher_object.namespace": "watcher", "watcher_object.data": { "old_state": "ACTIVE", "state": "FAILED" }, "watcher_object.version": "1.0" }, "last_seen_up": "2016-09-22T08:32:06Z", "name": "watcher-service", "sevice_host": "controller" }, "watcher_object.version": "1.0" }, "event_type": "service.update", "priority": "INFO", "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", "timestamp": "2016-10-18 09:52:05.219414", "publisher_id": "infra-optim:node0" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/requirements.txt0000664000175000017500000000041200000000000020703 0ustar00zuulzuul00000000000000sphinx>=2.1.1 # BSD sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0 sphinxcontrib-apidoc>=0.2.0 # BSD # openstack os-api-ref>=1.4.0 # Apache-2.0 openstackdocstheme>=2.2.1 # Apache-2.0 # releasenotes reno>=3.1.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/doc/source/0000775000175000017500000000000000000000000016722 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/doc/source/_static/0000775000175000017500000000000000000000000020350 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/_static/.placeholder0000664000175000017500000000000000000000000022621 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/doc/source/admin/0000775000175000017500000000000000000000000020012 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/admin/apache-mod-wsgi.rst0000664000175000017500000000260700000000000023516 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ Installing API behind mod_wsgi ============================== #. Install the Apache Service:: Fedora 21/RHEL7/CentOS7: sudo yum install httpd Fedora 22 (or higher): sudo dnf install httpd Debian/Ubuntu: apt-get install apache2 #. Copy ``etc/apache2/watcher.conf`` under the apache sites:: Fedora/RHEL7/CentOS7: sudo cp etc/apache2/watcher /etc/httpd/conf.d/watcher.conf Debian/Ubuntu: sudo cp etc/apache2/watcher /etc/apache2/sites-available/watcher.conf #. Edit ``/watcher.conf`` according to installation and environment. * Modify the ``WSGIDaemonProcess`` directive to set the ``user`` and ``group`` values to appropriate user on your server. * Modify the ``WSGIScriptAlias`` directive to point to the watcher/api/app.wsgi script. * Modify the ``Directory`` directive to set the path to the Watcher API code. * Modify the ``ErrorLog and CustomLog`` to redirect the logs to the right directory. #. Enable the apache watcher site and reload:: Fedora/RHEL7/CentOS7: sudo systemctl reload httpd Debian/Ubuntu: sudo a2ensite watcher sudo service apache2 reload ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/admin/gmr.rst0000664000175000017500000000323700000000000021336 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _watcher_gmr: ======================= Guru Meditation Reports ======================= Watcher contains a mechanism whereby developers and system administrators can generate a report about the state of a running Watcher service. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ================ A *GMR* can be generated by sending the *USR2* signal to any Watcher process with support (see below). The *GMR* will then be outputted as standard error for that particular process. For example, suppose that ``watcher-api`` has process id ``8675``, and was run with ``2>/var/log/watcher/watcher-api-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/watcher/watcher-api-err.log``. Structure of a GMR ================== The *GMR* is designed to be extensible; any particular service may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information. Threads Shows stack traces and thread ids for each of the threads within this process. Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids). Configuration Lists all the configuration options currently accessible via the CONF object for the current process. Plugins Lists all the plugins currently accessible by the Watcher service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/admin/index.rst0000664000175000017500000000035400000000000021655 0ustar00zuulzuul00000000000000=================== Administrator Guide =================== .. toctree:: :maxdepth: 2 apache-mod-wsgi gmr policy ../strategies/index ../datasources/index ../contributor/notifications ../contributor/concurrency ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/admin/policy.rst0000664000175000017500000001101700000000000022043 0ustar00zuulzuul00000000000000.. Copyright 2016 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Policies ======== .. warning:: JSON formatted policy file is deprecated since Watcher 6.0.0 (Wallaby). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html Watcher's public API calls may be restricted to certain sets of users using a policy configuration file. This document explains exactly how policies are configured and what they apply to. A policy is composed of a set of rules that are used in determining if a particular action may be performed by the authorized tenant. Constructing a Policy Configuration File ---------------------------------------- A policy configuration file is a simply JSON object that contain sets of rules. Each top-level key is the name of a rule. Each rule is a string that describes an action that may be performed in the Watcher API. The actions that may have a rule enforced on them are: * ``strategy:get_all``, ``strategy:detail`` - List available strategies * ``GET /v1/strategies`` * ``GET /v1/strategies/detail`` * ``strategy:get`` - Retrieve a specific strategy entity * ``GET /v1/strategies/`` * ``GET /v1/strategies/`` * ``goal:get_all``, ``goal:detail`` - List available goals * ``GET /v1/goals`` * ``GET /v1/goals/detail`` * ``goal:get`` - Retrieve a specific goal entity * ``GET /v1/goals/`` * ``GET /v1/goals/`` * ``audit_template:get_all``, ``audit_template:detail`` - List available audit_templates * ``GET /v1/audit_templates`` * ``GET /v1/audit_templates/detail`` * ``audit_template:get`` - Retrieve a specific audit template entity * ``GET /v1/audit_templates/`` * ``GET /v1/audit_templates/`` * ``audit_template:create`` - Create an audit template entity * ``POST /v1/audit_templates`` * ``audit_template:delete`` - Delete an audit template entity * ``DELETE /v1/audit_templates/`` * ``DELETE /v1/audit_templates/`` * ``audit_template:update`` - Update an audit template entity * ``PATCH /v1/audit_templates/`` * ``PATCH /v1/audit_templates/`` * ``audit:get_all``, ``audit:detail`` - List available audits * ``GET /v1/audits`` * ``GET /v1/audits/detail`` * ``audit:get`` - Retrieve a specific audit entity * ``GET /v1/audits/`` * ``audit:create`` - Create an audit entity * ``POST /v1/audits`` * ``audit:delete`` - Delete an audit entity * ``DELETE /v1/audits/`` * ``audit:update`` - Update an audit entity * ``PATCH /v1/audits/`` * ``action_plan:get_all``, ``action_plan:detail`` - List available action plans * ``GET /v1/action_plans`` * ``GET /v1/action_plans/detail`` * ``action_plan:get`` - Retrieve a specific action plan entity * ``GET /v1/action_plans/`` * ``action_plan:delete`` - Delete an action plan entity * ``DELETE /v1/action_plans/`` * ``action_plan:update`` - Update an action plan entity * ``PATCH /v1/audits/`` * ``action:get_all``, ``action:detail`` - List available action * ``GET /v1/actions`` * ``GET /v1/actions/detail`` * ``action:get`` - Retrieve a specific action plan entity * ``GET /v1/actions/`` * ``service:get_all``, ``service:detail`` - List available Watcher services * ``GET /v1/services`` * ``GET /v1/services/detail`` * ``service:get`` - Retrieve a specific Watcher service entity * ``GET /v1/services/`` To limit an action to a particular role or roles, you list the roles like so :: { "audit:create": ["role:admin", "role:superuser"] } The above would add a rule that only allowed users that had roles of either "admin" or "superuser" to launch an audit. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/architecture.rst0000664000175000017500000004531700000000000022150 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _architecture: =================== System Architecture =================== This page presents the current technical Architecture of the Watcher system. .. _architecture_overview: Overview ======== Below you will find a diagram, showing the main components of Watcher: .. image:: ./images/architecture.svg :width: 110% .. _components_definition: Components ========== .. _amqp_bus_definition: AMQP Bus -------- The AMQP message bus handles internal asynchronous communications between the different Watcher components. .. _cluster_datasource_definition: Datasource ---------- This component stores the metrics related to the cluster. It can potentially rely on any appropriate storage system (InfluxDB, OpenTSDB, MongoDB,...) but will probably be more performant when using `Time Series Databases `_ which are optimized for handling time series data, which are arrays of numbers indexed by time (a datetime or a datetime range). .. _archi_watcher_api_definition: Watcher API ----------- This component implements the REST API provided by the Watcher system to the external world. It enables the :ref:`Administrator ` of a :ref:`Cluster ` to control and monitor the Watcher system via any interaction mechanism connected to this API: - :ref:`CLI ` - Horizon plugin - Python SDK You can also read the detailed description of `Watcher API`_. .. _archi_watcher_applier_definition: Watcher Applier --------------- This component is in charge of executing the :ref:`Action Plan ` built by the :ref:`Watcher Decision Engine `. Taskflow is the default workflow engine for Watcher. It connects to the :ref:`message bus ` and launches the :ref:`Action Plan ` whenever a triggering message is received on a dedicated AMQP queue. The triggering message contains the Action Plan UUID. It then gets the detailed information about the :ref:`Action Plan ` from the :ref:`Watcher Database ` which contains the list of :ref:`Actions ` to launch. It then loops on each :ref:`Action `, gets the associated class and calls the execute() method of this class. Most of the time, this method will first request a token to the Keystone API and if it is allowed, sends a request to the REST API of the OpenStack service which handles this kind of :ref:`atomic Action `. Note that as soon as :ref:`Watcher Applier ` starts handling a given :ref:`Action ` from the list, a notification message is sent on the :ref:`message bus ` indicating that the state of the action has changed to **ONGOING**. If the :ref:`Action ` is successful, the :ref:`Watcher Applier ` sends a notification message on :ref:`the bus ` informing the other components of this. If the :ref:`Action ` fails, the :ref:`Watcher Applier ` tries to rollback to the previous state of the :ref:`Managed resource ` (i.e. before the command was sent to the underlying OpenStack service). In Stein, added a new config option 'action_execution_rule' which is a dict type. Its key field is strategy name and the value is 'ALWAYS' or 'ANY'. 'ALWAYS' means the callback function returns True as usual. 'ANY' means the return depends on the result of previous action execution. The callback returns True if previous action gets failed, and the engine continues to run the next action. If previous action executes success, the callback returns False then the next action will be ignored. For strategies that aren't in 'action_execution_rule', the callback always returns True. Please add the next section in the watcher.conf file if your strategy needs this feature. :: [watcher_workflow_engines.taskflow] action_execution_rule = {'your strategy name': 'ANY'} .. _archi_watcher_cli_definition: Watcher CLI ----------- The watcher command-line interface (CLI) can be used to interact with the Watcher system in order to control it or to know its current status. Please, read `the detailed documentation about Watcher CLI `_. .. _archi_watcher_dashboard_definition: Watcher Dashboard ----------------- The Watcher Dashboard can be used to interact with the Watcher system through Horizon in order to control it or to know its current status. Please, read `the detailed documentation about Watcher Dashboard `_. .. _archi_watcher_database_definition: Watcher Database ---------------- This database stores all the Watcher domain objects which can be requested by the :ref:`Watcher API ` or the :ref:`Watcher CLI `: - :ref:`Goals ` - :ref:`Strategies ` - :ref:`Audit templates ` - :ref:`Audits ` - :ref:`Action plans ` - :ref:`Efficacy indicators ` via the Action Plan API. - :ref:`Actions ` The Watcher domain being here "*optimization of some resources provided by an OpenStack system*". .. _archi_watcher_decision_engine_definition: Watcher Decision Engine ----------------------- This component is responsible for computing a set of potential optimization :ref:`Actions ` in order to fulfill the :ref:`Goal ` of an :ref:`Audit `. It first reads the parameters of the :ref:`Audit ` to know the :ref:`Goal ` to achieve. Unless specified, it then selects the most appropriate :ref:`strategy ` from the list of available strategies achieving this goal. The :ref:`Strategy ` is then dynamically loaded (via `stevedore `_). The :ref:`Watcher Decision Engine ` executes the strategy. In order to compute the potential :ref:`Solution ` for the Audit, the :ref:`Strategy ` relies on different sets of data: - :ref:`Cluster data models ` that are periodically synchronized through pluggable cluster data model collectors. These models contain the current state of various :ref:`Managed resources ` (e.g., the data stored in the Nova database). These models gives a strategy the ability to reason on the current state of a given :ref:`cluster `. - The data stored in the :ref:`Cluster Datasource ` which provides information about the past of the :ref:`Cluster `. Here below is a sequence diagram showing how the Decision Engine builds and maintains the :ref:`cluster data models ` that are used by the strategies. .. image:: ./images/sequence_architecture_cdmc_sync.png :width: 100% The execution of a strategy then yields a solution composed of a set of :ref:`Actions ` as well as a set of :ref:`efficacy indicators `. These :ref:`Actions ` are scheduled in time by the :ref:`Watcher Planner ` (i.e., it generates an :ref:`Action Plan `). .. _data_model: Data model ========== The following diagram shows the data model of Watcher, especially the functional dependency of objects from the actors (Admin, Customer) point of view (Goals, Audits, Action Plans, ...): .. image:: ./images/functional_data_model.svg :width: 100% Here below is a diagram representing the main objects in Watcher from a database perspective: .. image:: ./images/watcher_db_schema_diagram.png .. _sequence_diagrams: Sequence diagrams ================= The following paragraph shows the messages exchanged between the different components of Watcher for the most often used scenarios. .. _sequence_diagrams_create_audit_template: Create a new Audit Template --------------------------- The :ref:`Administrator ` first creates an :ref:`Audit template ` providing at least the following parameters: - A name - A goal to achieve - An optional strategy .. image:: ./images/sequence_create_audit_template.png :width: 100% The `Watcher API`_ makes sure that both the specified goal (mandatory) and its associated strategy (optional) are registered inside the :ref:`Watcher Database ` before storing a new audit template in the :ref:`Watcher Database `. .. _sequence_diagrams_create_and_launch_audit: Create and launch a new Audit ----------------------------- The :ref:`Administrator ` can then launch a new :ref:`Audit ` by providing at least the unique UUID of the previously created :ref:`Audit template `: .. image:: ./images/sequence_create_and_launch_audit.png :width: 100% The :ref:`Administrator ` also can specify type of Audit and interval (in case of CONTINUOUS type). There is three types of Audit: ONESHOT, CONTINUOUS and EVENT. ONESHOT Audit is launched once and if it succeeded executed new action plan list will be provided; CONTINUOUS Audit creates action plans with specified interval (in seconds or cron format, cron interval can be used like: ``*/5 * * * *``), if action plan has been created, all previous action plans get CANCELLED state; EVENT audit is launched when receiving webhooks API. A message is sent on the :ref:`AMQP bus ` which triggers the Audit in the :ref:`Watcher Decision Engine `: .. image:: ./images/sequence_trigger_audit_in_decision_engine.png :width: 100% The :ref:`Watcher Decision Engine ` reads the Audit parameters from the :ref:`Watcher Database `. It instantiates the appropriate :ref:`strategy ` (using entry points) given both the :ref:`goal ` and the strategy associated to the parent :ref:`audit template ` of the :ref:`audit `. If no strategy is associated to the audit template, the strategy is dynamically selected by the Decision Engine. The :ref:`Watcher Decision Engine ` also builds the :ref:`Cluster Data Model `. This data model is needed by the :ref:`Strategy ` to know the current state and topology of the audited :ref:`OpenStack cluster `. The :ref:`Watcher Decision Engine ` calls the **execute()** method of the instantiated :ref:`Strategy ` and provides the data model as an input parameter. This method computes a :ref:`Solution ` to achieve the goal and returns it to the :ref:`Decision Engine `. At this point, actions are not scheduled yet. The :ref:`Watcher Decision Engine ` dynamically loads the :ref:`Watcher Planner ` implementation which is configured in Watcher (via entry points) and calls the **schedule()** method of this class with the solution as an input parameter. This method finds an appropriate scheduling of :ref:`Actions ` taking into account some scheduling rules (such as priorities between actions). It generates a new :ref:`Action Plan ` with status **RECOMMENDED** and saves it into the :ref:`Watcher Database `. The saved action plan is now a scheduled flow of actions to which a global efficacy is associated alongside a number of :ref:`Efficacy Indicators ` as specified by the related :ref:`goal `. If every step executed successfully, the :ref:`Watcher Decision Engine ` updates the current status of the Audit to **SUCCEEDED** in the :ref:`Watcher Database ` and sends a notification on the bus to inform other components that the :ref:`Audit ` was successful. This internal workflow the Decision Engine follows to conduct an audit can be seen in the sequence diagram here below: .. image:: ./images/sequence_from_audit_execution_to_actionplan_creation.png :width: 100% .. _sequence_diagrams_launch_action_plan: Launch Action Plan ------------------ The :ref:`Administrator ` can then launch the recommended :ref:`Action Plan `: .. image:: ./images/sequence_launch_action_plan.png :width: 100% A message is sent on the :ref:`AMQP bus ` which triggers the :ref:`Action Plan ` in the :ref:`Watcher Applier `: .. image:: ./images/sequence_launch_action_plan_in_applier.png :width: 100% The :ref:`Watcher Applier ` will get the description of the flow of :ref:`Actions ` from the :ref:`Watcher Database ` and for each :ref:`Action ` it will instantiate a corresponding :ref:`Action ` handler python class. The :ref:`Watcher Applier ` will then call the following methods of the :ref:`Action ` handler: - **validate_parameters()**: this method will make sure that all the provided input parameters are valid: - If all parameters are valid, the Watcher Applier moves on to the next step. - If it is not, an error is raised and the action is not executed. A notification is sent on the bus informing other components of the failure. - **preconditions()**: this method will make sure that all conditions are met before executing the action (for example, it makes sure that an instance still exists before trying to migrate it). - **execute()**: this method is what triggers real commands on other OpenStack services (such as Nova, ...) in order to change target resource state. If the action is successfully executed, a notification message is sent on the bus indicating that the new state of the action is **SUCCEEDED**. If every action of the action flow has been executed successfully, a notification is sent on the bus to indicate that the whole :ref:`Action Plan ` has **SUCCEEDED**. .. _state_machine_diagrams: State Machine diagrams ====================== .. _audit_state_machine: Audit State Machine ------------------- An :ref:`Audit ` has a life-cycle and its current state may be one of the following: - **PENDING** : a request for an :ref:`Audit ` has been submitted (either manually by the :ref:`Administrator ` or automatically via some event handling mechanism) and is in the queue for being processed by the :ref:`Watcher Decision Engine ` - **ONGOING** : the :ref:`Audit ` is currently being processed by the :ref:`Watcher Decision Engine ` - **SUCCEEDED** : the :ref:`Audit ` has been executed successfully and at least one solution was found - **FAILED** : an error occurred while executing the :ref:`Audit ` - **DELETED** : the :ref:`Audit ` is still stored in the :ref:`Watcher database ` but is not returned any more through the Watcher APIs. - **CANCELLED** : the :ref:`Audit ` was in **PENDING** or **ONGOING** state and was cancelled by the :ref:`Administrator ` - **SUSPENDED** : the :ref:`Audit ` was in **ONGOING** state and was suspended by the :ref:`Administrator ` The following diagram shows the different possible states of an :ref:`Audit ` and what event makes the state change to a new value: .. image:: ./images/audit_state_machine.png :width: 100% .. _action_plan_state_machine: Action Plan State Machine ------------------------- An :ref:`Action Plan ` has a life-cycle and its current state may be one of the following: - **RECOMMENDED** : the :ref:`Action Plan ` is waiting for a validation from the :ref:`Administrator ` - **PENDING** : a request for an :ref:`Action Plan ` has been submitted (due to an :ref:`Administrator ` executing an :ref:`Audit `) and is in the queue for being processed by the :ref:`Watcher Applier ` - **ONGOING** : the :ref:`Action Plan ` is currently being processed by the :ref:`Watcher Applier ` - **SUCCEEDED** : the :ref:`Action Plan ` has been executed successfully (i.e. all :ref:`Actions ` that it contains have been executed successfully) - **FAILED** : an error occurred while executing the :ref:`Action Plan ` - **DELETED** : the :ref:`Action Plan ` is still stored in the :ref:`Watcher database ` but is not returned any more through the Watcher APIs. - **CANCELLED** : the :ref:`Action Plan ` was in **RECOMMENDED**, **PENDING** or **ONGOING** state and was cancelled by the :ref:`Administrator ` - **SUPERSEDED** : the :ref:`Action Plan ` was in RECOMMENDED state and was automatically superseded by Watcher, due to an expiration delay or an update of the :ref:`Cluster data model ` The following diagram shows the different possible states of an :ref:`Action Plan ` and what event makes the state change to a new value: .. image:: ./images/action_plan_state_machine.png :width: 100% .. _Watcher API: https://docs.openstack.org/api-ref/resource-optimization/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/conf.py0000664000175000017500000001123500000000000020223 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from watcher import objects objects.register_all() # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'oslo_config.sphinxext', 'sphinx.ext.viewcode', 'sphinxcontrib.httpdomain', 'sphinxcontrib.pecanwsme.rest', 'stevedore.sphinxext', 'ext.term', 'ext.versioned_notifications', 'oslo_config.sphinxconfiggen', 'openstackdocstheme', 'sphinx.ext.napoleon', 'sphinxcontrib.rsvgconverter', ] wsme_protocols = ['restjson'] config_generator_config_file = [( '../../etc/watcher/oslo-config-generator/watcher.conf', '_static/watcher')] sample_config_basename = 'watcher' # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Watcher' copyright = 'OpenStack Foundation' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['watcher.'] exclude_patterns = [ # The man directory includes some snippet files that are included # in other documents during the build but that should not be # included in the toctree themselves, so tell Sphinx to ignore # them when scanning for input files. 'man/footer.rst', 'man/general-options.rst', 'strategies/strategy-template.rst', 'image_src/plantuml/README.rst', ] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True suppress_warnings = ['app.add_directive'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for man page output -------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/watcher-api', 'watcher-api', 'Watcher API Server', ['OpenStack'], 1), ('man/watcher-applier', 'watcher-applier', 'Watcher Applier', ['OpenStack'], 1), ('man/watcher-db-manage', 'watcher-db-manage', 'Watcher Db Management Utility', ['OpenStack'], 1), ('man/watcher-decision-engine', 'watcher-decision-engine', 'Watcher Decision Engine', ['OpenStack'], 1), ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' html_theme = 'openstackdocs' # html_static_path = ['static'] # html_theme_options = {} # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # openstackdocstheme options openstackdocs_repo_name = 'openstack/watcher' openstackdocs_pdf_link = True openstackdocs_auto_name = False openstackdocs_bug_project = 'watcher' openstackdocs_bug_tag = '' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-watcher.tex', 'Watcher Documentation', 'OpenStack Foundation', 'manual'), ] # If false, no module index is generated. latex_domain_indices = False latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', } # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False # Example configuration for intersphinx: refer to the Python standard library. # intersphinx_mapping = {'http://docs.python.org/': None} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/doc/source/configuration/0000775000175000017500000000000000000000000021571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/configuration/configuring.rst0000664000175000017500000004110200000000000024633 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ =================== Configuring Watcher =================== This document is continually updated and reflects the latest available code of the Watcher service. Service overview ================ The Watcher system is a collection of services that provides support to optimize your IaaS platform. The Watcher service may, depending upon configuration, interact with several other OpenStack services. This includes: - the OpenStack Identity service (`keystone`_) for request authentication and to locate other OpenStack services. - the OpenStack Telemetry service (`ceilometer`_) for collecting the resources metrics. - the time series database (`gnocchi`_) for consuming the resources metrics. - the OpenStack Compute service (`nova`_) works with the Watcher service and acts as a user-facing API for instance migration. - the OpenStack Bare Metal service (`ironic`_) works with the Watcher service and allows to manage power state of nodes. - the OpenStack Block Storage service (`cinder`_) works with the Watcher service and as an API for volume node migration. The Watcher service includes the following components: - ``watcher-decision-engine``: runs audit on part of your IaaS and return an action plan in order to optimize resource placement. - ``watcher-api``: A RESTful API that processes application requests by sending them to the watcher-decision-engine over RPC. - ``watcher-applier``: applies the action plan. - `python-watcherclient`_: A command-line interface (CLI) for interacting with the Watcher service. - `watcher-dashboard`_: An Horizon plugin for interacting with the Watcher service. Additionally, the Watcher service has certain external dependencies, which are very similar to other OpenStack services: - A database to store audit and action plan information and state. You can set the database back-end type and location. - A queue. A central hub for passing messages, such as `RabbitMQ`_. Optionally, one may wish to utilize the following associated projects for additional functionality: - `watcher metering`_: an alternative to collect and push metrics to the Telemetry service. .. _`keystone`: https://github.com/openstack/keystone .. _`ceilometer`: https://github.com/openstack/ceilometer .. _`nova`: https://github.com/openstack/nova .. _`gnocchi`: https://github.com/gnocchixyz/gnocchi .. _`ironic`: https://github.com/openstack/ironic .. _`cinder`: https://github.com/openstack/cinder .. _`python-watcherclient`: https://github.com/openstack/python-watcherclient .. _`watcher-dashboard`: https://github.com/openstack/watcher-dashboard .. _`watcher metering`: https://github.com/b-com/watcher-metering .. _`RabbitMQ`: https://www.rabbitmq.com/ Install and configure prerequisites =================================== You can configure Watcher services to run on separate nodes or the same node. In this guide, the components run on one node, typically the Controller node. This section shows you how to install and configure the services. It assumes that the Identity, Image, Compute, and Networking services have already been set up. .. _identity-service_configuration: Configure the Identity service for the Watcher service ------------------------------------------------------ #. Create the Watcher service user (eg ``watcher``). The service uses this to authenticate with the Identity Service. Use the ``KEYSTONE_SERVICE_PROJECT_NAME`` project (named ``service`` by default in devstack) and give the user the ``admin`` role: .. code-block:: bash $ keystone user-create --name=watcher --pass=WATCHER_PASSWORD \ --email=watcher@example.com \ --tenant=KEYSTONE_SERVICE_PROJECT_NAME $ keystone user-role-add --user=watcher \ --tenant=KEYSTONE_SERVICE_PROJECT_NAME --role=admin or (by using python-openstackclient 1.8.0+) .. code-block:: bash $ openstack user create --password WATCHER_PASSWORD --enable \ --email watcher@example.com watcher \ --project=KEYSTONE_SERVICE_PROJECT_NAME $ openstack role add --project KEYSTONE_SERVICE_PROJECT_NAME \ --user watcher admin #. You must register the Watcher Service with the Identity Service so that other OpenStack services can locate it. To register the service: .. code-block:: bash $ keystone service-create --name=watcher --type=infra-optim \ --description="Infrastructure Optimization service" or (by using python-openstackclient 1.8.0+) .. code-block:: bash $ openstack service create --name watcher infra-optim \ --description="Infrastructure Optimization service" #. Create the endpoints by replacing YOUR_REGION and ``WATCHER_API_[PUBLIC|ADMIN|INTERNAL]_IP`` with your region and your Watcher Service's API node IP addresses (or FQDN): .. code-block:: bash $ keystone endpoint-create \ --service-id=the_service_id_above \ --publicurl=http://WATCHER_API_PUBLIC_IP:9322 \ --internalurl=http://WATCHER_API_INTERNAL_IP:9322 \ --adminurl=http://WATCHER_API_ADMIN_IP:9322 or (by using python-openstackclient 1.8.0+) .. code-block:: bash $ openstack endpoint create --region YOUR_REGION watcher public http://WATCHER_API_PUBLIC_IP:9322 $ openstack endpoint create --region YOUR_REGION watcher internal http://WATCHER_API_INTERNAL_IP:9322 $ openstack endpoint create --region YOUR_REGION watcher admin http://WATCHER_API_ADMIN_IP:9322 .. _watcher-db_configuration: Set up the database for Watcher ------------------------------- The Watcher service stores information in a database. This guide uses the MySQL database that is used by other OpenStack services. #. In MySQL, create a ``watcher`` database that is accessible by the ``watcher`` user. Replace WATCHER_DBPASSWORD with the actual password:: # mysql mysql> CREATE DATABASE watcher CHARACTER SET utf8; mysql> GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'localhost' \ IDENTIFIED BY 'WATCHER_DBPASSWORD'; mysql> GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'%' \ IDENTIFIED BY 'WATCHER_DBPASSWORD'; Configure the Watcher service ============================= The Watcher service is configured via its configuration file. This file is typically located at ``/etc/watcher/watcher.conf``. You can easily generate and update a sample configuration file named :ref:`watcher.conf.sample ` by using these following commands:: $ git clone https://opendev.org/openstack/watcher.git $ cd watcher/ $ tox -e genconfig $ vi etc/watcher/watcher.conf.sample The configuration file is organized into the following sections: * ``[DEFAULT]`` - General configuration * ``[api]`` - API server configuration * ``[database]`` - SQL driver configuration * ``[keystone_authtoken]`` - Keystone Authentication plugin configuration * ``[watcher_clients_auth]`` - Keystone auth configuration for clients * ``[watcher_applier]`` - Watcher Applier module configuration * ``[watcher_decision_engine]`` - Watcher Decision Engine module configuration * ``[oslo_messaging_rabbit]`` - Oslo Messaging RabbitMQ driver configuration * ``[cinder_client]`` - Cinder client configuration * ``[glance_client]`` - Glance client configuration * ``[gnocchi_client]`` - Gnocchi client configuration * ``[ironic_client]`` - Ironic client configuration * ``[keystone_client]`` - Keystone client configuration * ``[nova_client]`` - Nova client configuration * ``[neutron_client]`` - Neutron client configuration * ``[placement_client]`` - Placement client configuration The Watcher configuration file is expected to be named ``watcher.conf``. When starting Watcher, you can specify a different configuration file to use with ``--config-file``. If you do **not** specify a configuration file, Watcher will look in the following directories for a configuration file, in order: * ``~/.watcher/`` * ``~/`` * ``/etc/watcher/`` * ``/etc/`` Although some configuration options are mentioned here, it is recommended that you review all the :ref:`available options ` so that the watcher service is configured for your needs. #. The Watcher Service stores information in a database. This guide uses the MySQL database that is used by other OpenStack services. Configure the location of the database via the ``connection`` option. In the following, replace WATCHER_DBPASSWORD with the password of your ``watcher`` user, and replace DB_IP with the IP address where the DB server is located:: [database] ... # The SQLAlchemy connection string used to connect to the # database (string value) #connection= connection = mysql+pymysql://watcher:WATCHER_DBPASSWORD@DB_IP/watcher?charset=utf8 #. Configure the Watcher Service to use the RabbitMQ message broker by setting one or more of these options. Replace RABBIT_HOST with the IP address of the RabbitMQ server, RABBITMQ_USER and RABBITMQ_PASSWORD by the RabbitMQ server login credentials :: [DEFAULT] # The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the transport_url # option. (string value) control_exchange = watcher # ... transport_url = rabbit://RABBITMQ_USER:RABBITMQ_PASSWORD@RABBIT_HOST #. Watcher API shall validate the token provided by every incoming request, via keystonemiddleware, which requires the Watcher service to be configured with the right credentials for the Identity service. In the configuration section here below: * replace IDENTITY_IP with the IP of the Identity server * replace WATCHER_PASSWORD with the password you chose for the ``watcher`` user * replace KEYSTONE_SERVICE_PROJECT_NAME with the name of project created for OpenStack services (e.g. ``service``) :: [keystone_authtoken] # Authentication type to load (unknown value) # Deprecated group/name - [DEFAULT]/auth_plugin #auth_type = auth_type = password # Authentication URL (unknown value) #auth_url = auth_url = http://IDENTITY_IP:5000 # Username (unknown value) # Deprecated group/name - [DEFAULT]/username #username = username=watcher # User's password (unknown value) #password = password = WATCHER_PASSWORD # Domain ID containing project (unknown value) #project_domain_id = project_domain_id = default # User's domain id (unknown value) #user_domain_id = user_domain_id = default # Project name to scope to (unknown value) # Deprecated group/name - [DEFAULT]/tenant-name #project_name = project_name = KEYSTONE_SERVICE_PROJECT_NAME #. Watcher's decision engine and applier interact with other OpenStack projects through those projects' clients. In order to instantiate these clients, Watcher needs to request a new session from the Identity service using the right credentials. In the configuration section here below: * replace IDENTITY_IP with the IP of the Identity server * replace WATCHER_PASSWORD with the password you chose for the ``watcher`` user * replace KEYSTONE_SERVICE_PROJECT_NAME with the name of project created for OpenStack services (e.g. ``service``) :: [watcher_clients_auth] # Authentication type to load (unknown value) # Deprecated group/name - [DEFAULT]/auth_plugin #auth_type = auth_type = password # Authentication URL (unknown value) #auth_url = auth_url = http://IDENTITY_IP:5000 # Username (unknown value) # Deprecated group/name - [DEFAULT]/username #username = username=watcher # User's password (unknown value) #password = password = WATCHER_PASSWORD # Domain ID containing project (unknown value) #project_domain_id = project_domain_id = default # User's domain id (unknown value) #user_domain_id = user_domain_id = default # Project name to scope to (unknown value) # Deprecated group/name - [DEFAULT]/tenant-name #project_name = project_name = KEYSTONE_SERVICE_PROJECT_NAME #. Configure the clients to use a specific version if desired. For example, to configure Watcher to use a Nova client with version 2.1, use:: [nova_client] # Version of Nova API to use in novaclient. (string value) #api_version = 2.56 api_version = 2.1 #. Create the Watcher Service database tables:: $ watcher-db-manage --config-file /etc/watcher/watcher.conf create_schema #. Start the Watcher Service:: $ watcher-api && watcher-decision-engine && watcher-applier Configure Nova compute ====================== Please check your hypervisor configuration to correctly handle `instance migration`_. .. _`instance migration`: https://docs.openstack.org/nova/latest/admin/migration.html Configure Measurements ====================== You can configure and install Ceilometer by following the documentation below : #. https://docs.openstack.org/ceilometer/latest The built-in strategy 'basic_consolidation' provided by watcher requires "**compute.node.cpu.percent**" and "**cpu**" measurements to be collected by Ceilometer. The measurements available depend on the hypervisors that OpenStack manages on the specific implementation. You can find the measurements available per hypervisor and OpenStack release on the OpenStack site. You can use 'ceilometer meter-list' to list the available meters. For more information: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html Ceilometer is designed to collect measurements from OpenStack services and from other external components. If you would like to add new meters to the currently existing ones, you need to follow the documentation below: #. https://docs.openstack.org/ceilometer/latest/contributor/measurements.html#new-measurements The Ceilometer collector uses a pluggable storage system, meaning that you can pick any database system you prefer. The original implementation has been based on MongoDB but you can create your own storage driver using whatever technology you want. For more information : https://wiki.openstack.org/wiki/Gnocchi Configure Nova Notifications ============================ Watcher can consume notifications generated by the Nova services, in order to build or update, in real time, its cluster data model related to computing resources. Nova emits unversioned(legacy) and versioned notifications on different topics. Because legacy notifications will be deprecated, Watcher consumes Nova versioned notifications. * In the file ``/etc/nova/nova.conf``, the value of driver in the section ``[oslo_messaging_notifications]`` can't be noop, and the value of notification_format in the section ``[notifications]`` should be both or versioned :: [oslo_messaging_notifications] driver = messagingv2 ... [notifications] notification_format = both Configure Cinder Notifications ============================== Watcher can also consume notifications generated by the Cinder services, in order to build or update, in real time, its cluster data model related to storage resources. To do so, you have to update the Cinder configuration file on controller and volume nodes, in order to let Watcher receive Cinder notifications in a dedicated ``watcher_notifications`` channel. * In the file ``/etc/cinder/cinder.conf``, update the section ``[oslo_messaging_notifications]``, by redefining the list of topics into which Cinder services will publish events :: [oslo_messaging_notifications] driver = messagingv2 topics = notifications,watcher_notifications * Restart the Cinder services. Workers ======= You can define a number of workers for the Decision Engine and the Applier. If you want to create and run more audits simultaneously, you have to raise the number of workers used by the Decision Engine:: [watcher_decision_engine] ... # The maximum number of threads that can be used to execute strategies # (integer value) #max_workers = 2 If you want to execute simultaneously more recommended action plans, you have to raise the number of workers used by the Applier:: [watcher_applier] ... # Number of workers for applier, default value is 1. (integer value) # Minimum value: 1 #workers = 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/configuration/index.rst0000664000175000017500000000016500000000000023434 0ustar00zuulzuul00000000000000=================== Configuration Guide =================== .. toctree:: :maxdepth: 2 configuring watcher ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/configuration/watcher.rst0000664000175000017500000000037600000000000023766 0ustar00zuulzuul00000000000000.. _watcher_sample_configuration_files: ------------ watcher.conf ------------ The ``watcher.conf`` file contains most of the options to configure the Watcher services. .. show-options:: :config-file: etc/watcher/oslo-config-generator/watcher.conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/doc/source/contributor/0000775000175000017500000000000000000000000021274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/api_microversion_history.rst0000664000175000017500000000011300000000000027152 0ustar00zuulzuul00000000000000.. include:: ../../../watcher/api/controllers/rest_api_version_history.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/concurrency.rst0000664000175000017500000002632600000000000024371 0ustar00zuulzuul00000000000000=========== Concurrency =========== Introduction ************ Modern processors typically contain multiple cores all capable of executing instructions in parallel. Ensuring applications can fully utilize modern underlying hardware requires developing with these concepts in mind. The OpenStack foundation maintains a number of libraries to facilitate this utilization, combined with constructs like CPython's GIL_ the proper use of these concepts becomes more straightforward compared to other programming languages. The primary libraries maintained by OpenStack to facilitate concurrency are futurist_ and taskflow_. Here futurist is a more straightforward and lightweight library while taskflow is more advanced supporting features like rollback mechanisms. Within Watcher both libraries are used to facilitate concurrency. .. _GIL: https://wiki.python.org/moin/GlobalInterpreterLock .. _futurist: https://docs.openstack.org/futurist/latest/ .. _taskflow: https://docs.openstack.org/taskflow/latest/ Threadpool ********** A threadpool is a collection of one or more threads typically called *workers* to which tasks can be submitted. These submitted tasks will be scheduled by a threadpool and subsequently executed. In the case of Python tasks typically are bounded or unbounded methods while other programming languages like Java require implementing an interface. The order and amount of concurrency with which these tasks are executed is up to the threadpool to decide. Some libraries like taskflow allow for either strong or loose ordering of tasks while others like futurist might only support loose ordering. Taskflow supports building tree-based hierarchies of dependent tasks for example. Upon submission of a task to a threadpool a so called future_ is returned. These objects allow to determine information about the task such as if it is currently being executed or if it has finished execution. When the task has finished execution the future can also be used to retrieve what was returned by the method. Some libraries like futurist provide synchronization primitives for collections of futures such as wait_for_any_. The following sections will cover different types of concurrency used in various services of Watcher. .. _future: https://docs.python.org/3/library/concurrent.futures.html .. _wait_for_any: https://docs.openstack.org/futurist/latest/reference/index.html#waiters Decision engine concurrency *************************** The concurrency in the decision engine is governed by two independent threadpools. Both of these threadpools are GreenThreadPoolExecutor_ from the futurist_ library. One of these is used automatically and most contributors will not interact with it while developing new features. The other threadpool can frequently be used while developing new features or updating existing ones. It is known as the DecisionEngineThreadpool and allows to achieve performance improvements in network or I/O bound operations. .. _GreenThreadPoolExecutor: https://docs.openstack.org/futurist/latest/reference/index.html#executors AuditEndpoint ############# The first threadpool is used to allow multiple audits to be run in parallel. In practice, however, only one audit can be run in parallel. This is due to the data model used by audits being a singleton. To prevent audits destroying each others data model one must wait for the other to complete before being allowed to access this data model. A performance improvement could be achieved by being more intelligent in the use, caching and construction of these data models. DecisionEngineThreadPool ######################## The second threadpool is used for generic tasks, typically networking and I/O could benefit the most of this threadpool. Upon execution of an audit this threadpool can be utilized to retrieve information from the Nova compute service for instance. This second threadpool is a singleton and is shared amongst concurrently running audits as a result the amount of workers is static and independent from the amount of workers in the first threadpool. The use of the :class:`~.DecisionEngineThreadpool` while building the Nova compute data model is demonstrated to show how it can effectively be used. In the following example a reference to the :class:`~.DecisionEngineThreadpool` is stored in ``self.executor``. Here two tasks are submitted one with function ``self._collect_aggregates`` and the other function ``self._collect_zones``. With both ``self.executor.submit`` calls subsequent arguments are passed to the function. All subsequent arguments are passed to the function being submitted as task following the common ``(fn, *args, **kwargs)`` signature. One of the original signatures would be ``def _collect_aggregates(host_aggregates, compute_nodes)`` for example. .. code-block:: python zone_aggregate_futures = { self.executor.submit( self._collect_aggregates, host_aggregates, compute_nodes), self.executor.submit( self._collect_zones, availability_zones, compute_nodes) } waiters.wait_for_all(zone_aggregate_futures) The last statement of the example above waits on all futures to complete. Similarly, ``waiters.wait_for_any`` will wait for any future of the specified collection to complete. To simplify the usage of ``wait_for_any`` the :class:`~.DecisiongEngineThreadpool` defines a ``do_while_futures`` method. This method will iterate in a do_while loop over a collection of futures until all of them have completed. The advantage of ``do_while_futures`` is that it allows to immediately call a method as soon as a future finishes. The arguments for this callback method can be supplied when calling ``do_while_futures``, however, the first argument to the callback is always the future itself! If the collection of futures can safely be modified ``do_while_futures_modify`` can be used and should have slightly better performance. The following example will show how ``do_while_futures`` is used in the decision engine. .. code-block:: python # For every compute node from compute_nodes submit a task to gather the node it's information. # List comprehension is used to store all the futures of the submitted tasks in node_futures. node_futures = [self.executor.submit( self.nova_helper.get_compute_node_by_name, node, servers=True, detailed=True) for node in compute_nodes] LOG.debug("submitted {0} jobs".format(len(compute_nodes))) future_instances = [] # do_while iterate over node_futures and upon completion of a future call # self._compute_node_future with the future and future_instances as arguments. self.executor.do_while_futures_modify( node_futures, self._compute_node_future, future_instances) # Wait for all instance jobs to finish waiters.wait_for_all(future_instances) Finally, let's demonstrate how powerful this ``do_while_futures`` can be by showing what the ``compute_node_future`` callback does. First, it retrieves the result from the future and adds the compute node to the data model. Afterwards, it checks if the compute node has any associated instances and if so it submits an additional task to the :class:`~.DecisionEngineThreadpool`. The future is appended to the ``future_instances`` so ``waiters.wait_for_all`` can be called on this list. This is important as otherwise the building of the data model might return before all tasks for instances have finished. .. code-block:: python # Get the result from the future. node_info = future.result()[0] # Filter out baremetal nodes. if node_info.hypervisor_type == 'ironic': LOG.debug("filtering out baremetal node: %s", node_info) return # Add the compute node to the data model. self.add_compute_node(node_info) # Get the instances from the compute node. instances = getattr(node_info, "servers", None) # Do not submit job if there are no instances on compute node. if instances is None: LOG.info("No instances on compute_node: {0}".format(node_info)) return # Submit a job to retrieve detailed information about the instances. future_instances.append( self.executor.submit( self.add_instance_node, node_info, instances) ) Without ``do_while_futures`` an additional ``waiters.wait_for_all`` would be required in between the compute node tasks and the instance tasks. This would cause the progress of the decision engine to stall as less and less tasks remain active before the instance tasks could be submitted. This demonstrates how ``do_while_futures`` can be used to achieve more constant utilization of the underlying hardware. Applier concurrency ******************* The applier does not use the futurist_ GreenThreadPoolExecutor_ directly but instead uses taskflow_. However, taskflow still utilizes a greenthreadpool. This threadpool is initialized in the workflow engine called :class:`~.DefaultWorkFlowEngine`. Currently Watcher supports one workflow engine but the base class allows contributors to develop other workflow engines as well. In taskflow tasks are created using different types of flows such as a linear, unordered or a graph flow. The linear and graph flow allow for strong ordering between individual tasks and it is for this reason that the workflow engine utilizes a graph flow. The creation of tasks, subsequently linking them into a graph like structure and submitting them is shown below. .. code-block:: python self.execution_rule = self.get_execution_rule(actions) flow = gf.Flow("watcher_flow") actions_uuid = {} for a in actions: task = TaskFlowActionContainer(a, self) flow.add(task) actions_uuid[a.uuid] = task for a in actions: for parent_id in a.parents: flow.link(actions_uuid[parent_id], actions_uuid[a.uuid], decider=self.decider) e = engines.load( flow, executor='greenthreaded', engine='parallel', max_workers=self.config.max_workers) e.run() return flow In the applier tasks are contained in a :class:`~.TaskFlowActionContainer` which allows them to trigger events in the workflow engine. This way the workflow engine can halt or take other actions while the action plan is being executed based on the success or failure of individual actions. However, the base workflow engine simply uses these notifies to store the result of individual actions in the database. Additionally, since taskflow uses a graph flow if any of the tasks would fail all children of this tasks not be executed while ``do_revert`` will be triggered for all parents. .. code-block:: python class TaskFlowActionContainer(...): ... def do_execute(self, *args, **kwargs): ... result = self.action.execute() if result is True: return self.engine.notify(self._db_action, objects.action.State.SUCCEEDED) else: self.engine.notify(self._db_action, objects.action.State.FAILED) class BaseWorkFlowEngine(...): ... def notify(self, action, state): db_action = objects.Action.get_by_uuid(self.context, action.uuid, eager=True) db_action.state = state db_action.save() return db_action ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/contributing.rst0000664000175000017500000001116000000000000024534 0ustar00zuulzuul00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with Watcher. Communication ~~~~~~~~~~~~~~ .. This would be a good place to put the channel you chat in as a project; when/ where your meeting is, the tags you prepend to your ML threads, etc. IRC Channel ``#openstack-watcher`` (changelog_) Mailing list(prefix subjects with ``[watcher]``) http://lists.openstack.org/pipermail/openstack-discuss/ Weekly Meetings Bi-weekly, on Wednesdays at 08:00 UTC on odd weeks in the ``#openstack-meeting-alt`` IRC channel (`meetings logs`_) Meeting Agenda https://wiki.openstack.org/wiki/Watcher_Meeting_Agenda .. _changelog: http://eavesdrop.openstack.org/irclogs/%23openstack-watcher/ .. _meetings logs: http://eavesdrop.openstack.org/meetings/watcher/ Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~~ .. This section should list the core team, their irc nicks, emails, timezones etc. If all this info is maintained elsewhere (i.e. a wiki), you can link to that instead of enumerating everyone here. +--------------------+---------------+------------------------------------+ | Name | IRC | Email | +====================+===============+====================================+ | `Li Canwei`_ | licanwei | li.canwei2@zte.com.cn | +--------------------+---------------+------------------------------------+ | `chen ke`_ | chenke | chen.ke14@zte.com.cn | +--------------------+---------------+------------------------------------+ | `Corne Lukken`_ | dantalion | info@dantalion.nl | +--------------------+---------------+------------------------------------+ | `su zhengwei`_ | suzhengwei | sugar-2008@163.com | +--------------------+---------------+------------------------------------+ | `Yumeng Bao`_ | Yumeng | yumeng_bao@yahoo.com | +--------------------+---------------+------------------------------------+ .. _Corne Lukken: https://launchpad.net/~dantalion .. _Li Canwei: https://launchpad.net/~li-canwei2 .. _su zhengwei: https://launchpad.net/~sue.sam .. _Yumeng Bao: https://launchpad.net/~yumeng-bao .. _chen ke: https://launchpad.net/~chenker New Feature Planning ~~~~~~~~~~~~~~~~~~~~ .. This section is for talking about the process to get a new feature in. Some projects use blueprints, some want specs, some want both! Some projects stick to a strict schedule when selecting what new features will be reviewed for a release. New feature will be discussed via IRC or ML (with [Watcher] prefix). Watcher team uses blueprints in `Launchpad`_ to manage the new features. .. _Launchpad: https://launchpad.net/watcher Task Tracking ~~~~~~~~~~~~~~ .. This section is about where you track tasks- launchpad? storyboard? is there more than one launchpad project? what's the name of the project group in storyboard? We track our tasks in Launchpad. If you're looking for some smaller, easier work item to pick up and get started on, search for the 'low-hanging-fruit' tag. .. NOTE: If your tag is not 'low-hanging-fruit' please change the text above. Reporting a Bug ~~~~~~~~~~~~~~~ .. Pretty self explanatory section, link directly to where people should report bugs for your project. You found an issue and want to make sure we are aware of it? You can do so `HERE`_. .. _HERE: https://bugs.launchpad.net/watcher Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ .. This section should have info about what it takes to get something merged. Do you require one or two +2's before +W? Do some of your repos require unit test changes with all patches? etc. Due to the small number of core reviewers of the Watcher project, we only need one +2 before +W (merge). All patches excepting for documentation or typos fixes must have unit test. Project Team Lead Duties ------------------------ .. this section is where you can put PTL specific duties not already listed in the common PTL guide (linked below) or if you already have them written up elsewhere, you can link to that doc here. All common PTL duties are enumerated here in the `PTL guide `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/devstack.rst0000664000175000017500000002656200000000000023645 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ============================================= Set up a development environment via DevStack ============================================= Watcher is currently able to optimize compute resources - specifically Nova compute hosts - via operations such as live migrations. In order for you to fully be able to exercise what Watcher can do, it is necessary to have a multinode environment to use. You can set up the Watcher services quickly and easily using a Watcher DevStack plugin. See `PluginModelDocs`_ for information on DevStack's plugin model. To enable the Watcher plugin with DevStack, add the following to the ``[[local|localrc]]`` section of your controller's ``local.conf`` to enable the Watcher plugin:: enable_plugin watcher https://opendev.org/openstack/watcher For more detailed instructions, see `Detailed DevStack Instructions`_. Check out the `DevStack documentation`_ for more information regarding DevStack. .. _PluginModelDocs: https://docs.openstack.org/devstack/latest/plugins.html .. _DevStack documentation: https://docs.openstack.org/devstack/latest Quick Devstack Instructions with Datasources ============================================ Watcher requires a datasource to collect metrics from compute nodes and instances in order to execute most strategies. To enable this a ``[[local|localrc]]`` to setup DevStack for some of the supported datasources is provided. These examples specify the minimal configuration parameters to get both Watcher and the datasource working but can be expanded is desired. Gnocchi ------- With the Gnocchi datasource most of the metrics for compute nodes and instances will work with the provided configuration but metrics that require Ironic such as ``host_airflow and`` ``host_power`` will still be unavailable as well as ``instance_l3_cpu_cache`` .. code-block:: ini [[local|localrc]] enable_plugin watcher https://opendev.org/openstack/watcher enable_plugin watcher-dashboard https://opendev.org/openstack/watcher-dashboard enable_plugin ceilometer https://opendev.org/openstack/ceilometer.git enable_plugin aodh https://opendev.org/openstack/aodh enable_plugin panko https://opendev.org/openstack/panko CEILOMETER_BACKEND=gnocchi [[post-config|$NOVA_CONF]] [DEFAULT] compute_monitors=cpu.virt_driver Detailed DevStack Instructions ============================== #. Obtain N (where N >= 1) servers (virtual machines preferred for DevStack). One of these servers will be the controller node while the others will be compute nodes. N is preferably >= 3 so that you have at least 2 compute nodes, but in order to stand up the Watcher services only 1 server is needed (i.e., no computes are needed if you want to just experiment with the Watcher services). These servers can be VMs running on your local machine via VirtualBox if you prefer. DevStack currently recommends that you use Ubuntu 16.04 LTS. The servers should also have connections to the same network such that they are all able to communicate with one another. #. For each server, clone the DevStack repository and create the stack user .. code-block:: bash sudo apt-get update sudo apt-get install git git clone https://opendev.org/openstack/devstack.git sudo ./devstack/tools/create-stack-user.sh Now you have a stack user that is used to run the DevStack processes. You may want to give your stack user a password to allow SSH via a password .. code-block:: bash sudo passwd stack #. Switch to the stack user and clone the DevStack repo again .. code-block:: bash sudo su stack cd ~ git clone https://opendev.org/openstack/devstack.git #. For each compute node, copy the provided `local.conf.compute`_ example file to the compute node's system at ~/devstack/local.conf. Make sure the HOST_IP and SERVICE_HOST values are changed appropriately - i.e., HOST_IP is set to the IP address of the compute node and SERVICE_HOST is set to the IP address of the controller node. If you need specific metrics collected (or want to use something other than Ceilometer), be sure to configure it. For example, in the `local.conf.compute`_ example file, the appropriate ceilometer plugins and services are enabled and disabled. If you were using something other than Ceilometer, then you would likely want to configure it likewise. The example file also sets the compute monitors nova configuration option to use the CPU virt driver. If you needed other metrics, it may be necessary to configure similar configuration options for the projects providing those metrics. #. For the controller node, copy the provided `local.conf.controller`_ example file to the controller node's system at ~/devstack/local.conf. Make sure the HOST_IP value is changed appropriately - i.e., HOST_IP is set to the IP address of the controller node. .. NOTE:: if you want to use another Watcher git repository (such as a local one), then change the enable plugin line .. code-block:: bash enable_plugin watcher [optional_branch] If you do this, then the Watcher DevStack plugin will try to pull the python-watcherclient repo from ``/../``, so either make sure that is also available or specify WATCHERCLIENT_REPO in the ``local.conf`` file. .. NOTE:: if you want to use a specific branch, specify WATCHER_BRANCH in the local.conf file. By default it will use the master branch. .. Note:: watcher-api will default run under apache/httpd, set the variable WATCHER_USE_MOD_WSGI=FALSE if you do not wish to run under apache/httpd. For development environment it is suggested to set WATHCER_USE_MOD_WSGI to FALSE. For Production environment it is suggested to keep it at the default TRUE value. #. Start stacking from the controller node:: ./devstack/stack.sh #. Start stacking on each of the compute nodes using the same command. .. seealso:: Configure the environment for live migration via NFS. See the `Multi-Node DevStack Environment`_ section for more details. .. _local.conf.controller: https://github.com/openstack/watcher/tree/master/devstack/local.conf.controller .. _local.conf.compute: https://github.com/openstack/watcher/tree/master/devstack/local.conf.compute Multi-Node DevStack Environment =============================== Since deploying Watcher with only a single compute node is not very useful, a few tips are given here for enabling a multi-node environment with live migration. .. NOTE:: Nova supports live migration with local block storage so by default NFS is not required and is considered an advance configuration. The minimum requirements for live migration are: - all hostnames are resolvable on each host - all hosts have a passwordless ssh key that is trusted by the other hosts - all hosts have a known_hosts file that lists each hosts If these requirements are met live migration will be possible. Shared storage such as ceph, booting form cinder volume or nfs are recommend when testing evacuate if you want to preserve vm data. Setting up SSH keys between compute nodes to enable live migration ------------------------------------------------------------------ In order for live migration to work, SSH keys need to be exchanged between each compute node: 1. The SOURCE root user's public RSA key (likely in /root/.ssh/id_rsa.pub) needs to be in the DESTINATION stack user's authorized_keys file (~stack/.ssh/authorized_keys). This can be accomplished by manually copying the contents from the file on the SOURCE to the DESTINATION. If you have a password configured for the stack user, then you can use the following command to accomplish the same thing:: ssh-copy-id -i /root/.ssh/id_rsa.pub stack@DESTINATION 2. The DESTINATION host's public ECDSA key (/etc/ssh/ssh_host_ecdsa_key.pub) needs to be in the SOURCE root user's known_hosts file (/root/.ssh/known_hosts). This can be accomplished by running the following on the SOURCE machine (hostname must be used):: ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts In essence, this means that every compute node's root user's public RSA key must exist in every other compute node's stack user's authorized_keys file and every compute node's public ECDSA key needs to be in every other compute node's root user's known_hosts file. Configuring NFS Server (ADVANCED) --------------------------------- If you would like to use live migration for shared storage, then the controller can serve as the NFS server if needed .. code-block:: bash sudo apt-get install nfs-kernel-server sudo mkdir -p /nfs/instances sudo chown stack:stack /nfs/instances Add an entry to ``/etc/exports`` with the appropriate gateway and netmask information .. code-block:: bash /nfs/instances /(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash) Export the NFS directories .. code-block:: bash sudo exportfs -ra Make sure the NFS server is running .. code-block:: bash sudo service nfs-kernel-server status If the server is not running, then start it .. code-block:: bash sudo service nfs-kernel-server start Configuring NFS on Compute Node (ADVANCED) ------------------------------------------ Each compute node needs to use the NFS server to hold the instance data .. code-block:: bash sudo apt-get install rpcbind nfs-common mkdir -p /opt/stack/data/instances sudo mount :/nfs/instances /opt/stack/data/instances If you would like to have the NFS directory automatically mounted on reboot, then add the following to ``/etc/fstab`` .. code-block:: bash :/nfs/instances /opt/stack/data/instances nfs auto 0 0 Configuring libvirt to listen on tcp (ADVANCED) ----------------------------------------------- .. NOTE:: By default nova will use ssh as a transport for live migration if you have a low bandwidth connection you can use tcp instead however this is generally not recommended. Edit ``/etc/libvirt/libvirtd.conf`` to make sure the following values are set .. code-block:: ini listen_tls = 0 listen_tcp = 1 auth_tcp = "none" Edit ``/etc/default/libvirt-bin`` .. code-block:: ini libvirtd_opts="-d -l" Restart the libvirt service .. code-block:: bash sudo service libvirt-bin restart VNC server configuration ------------------------ The VNC server listening parameter needs to be set to any address so that the server can accept connections from all of the compute nodes. On both the controller and compute node, in ``/etc/nova/nova.conf`` .. code-block:: ini [vnc] server_listen = "0.0.0.0" Alternatively, in devstack's ``local.conf``: .. code-block:: bash VNCSERVER_LISTEN="0.0.0.0" Environment final checkup ------------------------- If you are willing to make sure everything is in order in your DevStack environment, you can run the Watcher Tempest tests which will validate its API but also that you can perform the typical Watcher workflows. To do so, have a look at the :ref:`Tempest tests ` section which will explain to you how to run them. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/environment.rst0000664000175000017500000001734300000000000024402 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _watcher_development_environment: ========================================= Set up a development environment manually ========================================= This document describes getting the source from watcher `Git repository`_ for development purposes. To install Watcher from packaging, refer instead to Watcher `User Documentation`_. .. _`Git Repository`: https://opendev.org/openstack/watcher .. _`User Documentation`: https://docs.openstack.org/watcher/latest/ Prerequisites ============= This document assumes you are using Ubuntu or Fedora, and that you have the following tools available on your system: - Python_ 2.7 and 3.5 - git_ - setuptools_ - pip_ - msgfmt (part of the gettext package) - virtualenv and virtualenvwrapper_ **Reminder**: If you're successfully using a different platform, or a different version of the above, please document your configuration here! .. _Python: https://www.python.org/ .. _git: https://git-scm.com/ .. _setuptools: https://pypi.org/project/setuptools .. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/en/latest/install.html Getting the latest code ======================= Make a clone of the code from our ``Git repository``: .. code-block:: bash $ git clone https://opendev.org/openstack/watcher.git When that is complete, you can: .. code-block:: bash $ cd watcher Installing dependencies ======================= Watcher maintains two lists of dependencies:: requirements.txt test-requirements.txt The first is the list of dependencies needed for running Watcher, the second list includes dependencies used for active development and testing of Watcher itself. These dependencies can be installed from PyPi_ using the Python tool pip_. .. _PyPi: https://pypi.org/ .. _pip: https://pypi.org/project/pip However, your system *may* need additional dependencies that ``pip`` (and by extension, PyPi) cannot satisfy. These dependencies should be installed prior to using ``pip``, and the installation method may vary depending on your platform. * Ubuntu 16.04:: $ sudo apt-get install python-dev libssl-dev libmysqlclient-dev libffi-dev * Fedora 24+:: $ sudo dnf install redhat-rpm-config gcc python-devel libxml2-devel * CentOS 7:: $ sudo yum install gcc python-devel libxml2-devel libxslt-devel mariadb-devel PyPi Packages and VirtualEnv ---------------------------- We recommend establishing a virtualenv to run Watcher within. virtualenv limits the Python environment to just what you're installing as dependencies, useful to keep a clean environment for working on Watcher. .. code-block:: bash $ mkvirtualenv watcher $ git clone https://opendev.org/openstack/watcher.git # Use 'python setup.py' to link Watcher into Python's site-packages $ cd watcher && python setup.py install # Install the dependencies for running Watcher $ pip install -r ./requirements.txt # Install the dependencies for developing, testing, and running Watcher $ pip install -r ./test-requirements.txt This will create a local virtual environment in the directory ``$WORKON_HOME``. The virtual environment can be disabled using the command: .. code-block:: bash $ deactivate You can re-activate this virtualenv for your current shell using: .. code-block:: bash $ workon watcher For more information on virtual environments, see virtualenv_ and virtualenvwrapper_. .. _virtualenv: https://pypi.org/project/virtualenv/ Verifying Watcher is set up =========================== Once set up, either directly or within a virtualenv, you should be able to invoke Python and import the libraries. If you're using a virtualenv, don't forget to activate it: .. code-block:: bash $ workon watcher You should then be able to ``import watcher`` using Python without issue: .. code-block:: bash $ python -c "import watcher" If you can import watcher without a traceback, you should be ready to develop. Run Watcher tests ================= Watcher provides both :ref:`unit tests ` and :ref:`functional/tempest tests `. Please refer to :doc:`testing` to understand how to run them. Build the Watcher documentation =============================== You can easily build the HTML documentation from ``doc/source`` files, by using ``tox``: .. code-block:: bash $ workon watcher (watcher) $ cd watcher (watcher) $ tox -edocs The HTML files are available into ``doc/build`` directory. Configure the Watcher services ============================== Watcher services require a configuration file. Use tox to generate a sample configuration file that can be used to get started: .. code-block:: bash $ tox -e genconfig $ cp etc/watcher.conf.sample etc/watcher.conf Most of the default configuration should be enough to get you going, but you still need to configure the following sections: - The ``[database]`` section to configure the :ref:`Watcher database ` - The ``[keystone_authtoken]`` section to configure the :ref:`Identity service ` i.e. Keystone - The ``[watcher_messaging]`` section to configure the OpenStack AMQP-based message bus - The ``watcher_clients_auth`` section to configure Keystone client to access related OpenStack projects So if you need some more details on how to configure one or more of these sections, please do have a look at :doc:`../configuration/configuring` before continuing. Create Watcher SQL database =========================== When initially getting set up, after you've configured which databases to use, you're probably going to need to run the following to your database schema in place: .. code-block:: bash $ workon watcher (watcher) $ watcher-db-manage create_schema Running Watcher services ======================== To run the Watcher API service, use: .. code-block:: bash $ workon watcher (watcher) $ watcher-api To run the Watcher Decision Engine service, use: .. code-block:: bash $ workon watcher (watcher) $ watcher-decision-engine To run the Watcher Applier service, use: .. code-block:: bash $ workon watcher (watcher) $ watcher-applier Default configuration of these services are available into ``/etc/watcher`` directory. See :doc:`../configuration/configuring` for details on how Watcher is configured. By default, Watcher is configured with SQL backends. Interact with Watcher ===================== You can also interact with Watcher through its REST API. There is a Python Watcher client library `python-watcherclient`_ which interacts exclusively through the REST API, and which Watcher itself uses to provide its command-line interface. .. _`python-watcherclient`: https://github.com/openstack/python-watcherclient There is also an Horizon plugin for Watcher `watcher-dashboard`_ which allows to interact with Watcher through a web-based interface. .. _`watcher-dashboard`: https://github.com/openstack/watcher-dashboard Exercising the Watcher Services locally ======================================= If you would like to exercise the Watcher services in isolation within a local virtual environment, you can do this without starting any other OpenStack services. For example, this is useful for rapidly prototyping and debugging interactions over the RPC channel, testing database migrations, and so forth. You will find in the `watcher-tools`_ project, Ansible playbooks and Docker template files to easily play with Watcher services within a minimal OpenStack isolated environment (Identity, Message Bus, SQL database, Horizon, ...). .. _`watcher-tools`: https://github.com/b-com/watcher-tools ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/index.rst0000664000175000017500000000023400000000000023134 0ustar00zuulzuul00000000000000================== Contribution Guide ================== .. toctree:: :maxdepth: 2 contributing environment devstack testing rally_link ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/notifications.rst0000664000175000017500000000052300000000000024677 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _watcher_notifications: ======================== Notifications in Watcher ======================== .. versioned_notifications:: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6111352 python_watcher-14.0.0/doc/source/contributor/plugin/0000775000175000017500000000000000000000000022572 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/plugin/action-plugin.rst0000664000175000017500000001716400000000000026106 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_action_plugin: ================== Build a new action ================== Watcher Applier has an external :ref:`action ` plugin interface which gives anyone the ability to integrate an external :ref:`action ` in order to extend the initial set of actions Watcher provides. This section gives some guidelines on how to implement and integrate custom actions with Watcher. Creating a new plugin ===================== First of all you have to extend the base :py:class:`BaseAction` class which defines a set of abstract methods and/or properties that you will have to implement: - The :py:attr:`~.BaseAction.schema` is an abstract property that you have to implement. This is the first function to be called by the :ref:`applier ` before any further processing and its role is to validate the input parameters that were provided to it. - The :py:meth:`~.BaseAction.pre_condition` is called before the execution of an action. This method is a hook that can be used to perform some initializations or to make some more advanced validation on its input parameters. If you wish to block the execution based on this factor, you simply have to ``raise`` an exception. - The :py:meth:`~.BaseAction.post_condition` is called after the execution of an action. As this function is called regardless of whether an action succeeded or not, this can prove itself useful to perform cleanup operations. - The :py:meth:`~.BaseAction.execute` is the main component of an action. This is where you should implement the logic of your action. - The :py:meth:`~.BaseAction.revert` allows you to roll back the targeted resource to its original state following a faulty execution. Indeed, this method is called by the workflow engine whenever an action raises an exception. Here is an example showing how you can write a plugin called ``DummyAction``: .. code-block:: python # Filepath = /thirdparty/dummy.py # Import path = thirdparty.dummy import voluptuous from watcher.applier.actions import base class DummyAction(base.BaseAction): @property def schema(self): return voluptuous.Schema({}) def execute(self): # Does nothing pass # Only returning False is considered as a failure def revert(self): # Does nothing pass def pre_condition(self): # No pre-checks are done here pass def post_condition(self): # Nothing done here pass This implementation is the most basic one. So in order to get a better understanding on how to implement a more advanced action, have a look at the :py:class:`~watcher.applier.actions.migration.Migrate` class. Input validation ---------------- As you can see in the previous example, we are using `Voluptuous`_ to validate the input parameters of an action. So if you want to learn more about how to work with `Voluptuous`_, you can have a look at their `documentation`_: .. _Voluptuous: https://github.com/alecthomas/voluptuous .. _documentation: https://github.com/alecthomas/voluptuous/blob/master/README.md Define configuration parameters =============================== At this point, you have a fully functional action. However, in more complex implementation, you may want to define some configuration options so one can tune the action to its needs. To do so, you can implement the :py:meth:`~.Loadable.get_config_opts` class method as followed: .. code-block:: python from oslo_config import cfg class DummyAction(base.BaseAction): # [...] def execute(self): assert self.config.test_opt == 0 @classmethod def get_config_opts(cls): return super( DummyAction, cls).get_config_opts() + [ cfg.StrOpt('test_opt', help="Demo Option.", default=0), # Some more options ... ] The configuration options defined within this class method will be included within the global ``watcher.conf`` configuration file under a section named by convention: ``{namespace}.{plugin_name}``. In our case, the ``watcher.conf`` configuration would have to be modified as followed: .. code-block:: ini [watcher_actions.dummy] # Option used for testing. test_opt = test_value Then, the configuration options you define within this method will then be injected in each instantiated object via the ``config`` parameter of the :py:meth:`~.BaseAction.__init__` method. Abstract Plugin Class ===================== Here below is the abstract ``BaseAction`` class that every single action should implement: .. autoclass:: watcher.applier.actions.base.BaseAction :members: :special-members: __init__ :noindex: .. py:attribute:: schema Defines a Schema that the input parameters shall comply to :returns: A schema declaring the input parameters this action should be provided along with their respective constraints (e.g. type, value range, ...) :rtype: :py:class:`voluptuous.Schema` instance Register a new entry point ========================== In order for the Watcher Applier to load your new action, the action must be registered as a named entry point under the ``watcher_actions`` entry point of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique. Here below is how you would proceed to register ``DummyAction`` using pbr_: .. code-block:: ini [entry_points] watcher_actions = dummy = thirdparty.dummy:DummyAction .. _pbr: https://docs.openstack.org/pbr/latest Using action plugins ==================== The Watcher Applier service will automatically discover any installed plugins when it is restarted. If a Python package containing a custom plugin is installed within the same environment as Watcher, Watcher will automatically make that plugin available for use. At this point, you can use your new action plugin in your :ref:`strategy plugin ` if you reference it via the use of the :py:meth:`~.Solution.add_action` method: .. code-block:: python # [...] self.solution.add_action( action_type="dummy", # Name of the entry point we registered earlier applies_to="", input_parameters={}) By doing so, your action will be saved within the Watcher Database, ready to be processed by the planner for creating an action plan which can then be executed by the Watcher Applier via its workflow engine. At the last, remember to add the action into the weights in ``watcher.conf``, otherwise you will get an error when the action be referenced in a strategy. Scheduling of an action plugin ============================== Watcher provides a basic built-in :ref:`planner ` which is only able to process the Watcher built-in actions. Therefore, you will either have to use an existing third-party planner or :ref:`implement another planner ` that will be able to take into account your new action plugin. Test your new action ==================== In order to test your new action via a manual test or a Tempest test, you can use the :py:class:`~.Actuator` strategy and pass it one or more actions to execute. This way, you can isolate your action to see if it works as expected. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/plugin/base-setup.rst0000664000175000017500000000634100000000000025400 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _plugin-base_setup: ======================================= Create a third-party plugin for Watcher ======================================= Watcher provides a plugin architecture which allows anyone to extend the existing functionalities by implementing third-party plugins. This process can be cumbersome so this documentation is there to help you get going as quickly as possible. Pre-requisites ============== We assume that you have set up a working Watcher development environment. So if this not already the case, you can check out our documentation which explains how to set up a :ref:`development environment `. .. _development environment: Third party project scaffolding =============================== First off, we need to create the project structure. To do so, we can use `cookiecutter`_ and the `OpenStack cookiecutter`_ project scaffolder to generate the skeleton of our project:: $ virtualenv thirdparty $ . thirdparty/bin/activate $ pip install cookiecutter $ cookiecutter https://github.com/openstack-dev/cookiecutter The last command will ask you for many information, and If you set ``module_name`` and ``repo_name`` as ``thirdparty``, you should end up with a structure that looks like this:: $ cd thirdparty $ tree . . ├── babel.cfg ├── CONTRIBUTING.rst ├── doc │   └── source │   ├── conf.py │   ├── contributing.rst │   ├── index.rst │   ├── installation.rst │   ├── readme.rst │   └── usage.rst ├── HACKING.rst ├── LICENSE ├── MANIFEST.in ├── README.rst ├── requirements.txt ├── setup.cfg ├── setup.py ├── test-requirements.txt ├── thirdparty │   ├── __init__.py │   └── tests │   ├── base.py │   ├── __init__.py │   └── test_thirdparty.py └── tox.ini **Note:** You should add `python-watcher`_ as a dependency in the requirements.txt file:: # Watcher-specific requirements python-watcher .. _cookiecutter: https://github.com/audreyr/cookiecutter .. _OpenStack cookiecutter: https://github.com/openstack-dev/cookiecutter .. _python-watcher: https://pypi.org/project/python-watcher Implementing a plugin for Watcher ================================= Now that the project skeleton has been created, you can start the implementation of your plugin. As of now, you can implement the following plugins for Watcher: - A :ref:`goal plugin ` - A :ref:`strategy plugin ` - An :ref:`action plugin ` - A :ref:`planner plugin ` - A workflow engine plugin - A :ref:`cluster data model collector plugin ` If you want to learn more on how to implement them, you can refer to their dedicated documentation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/plugin/cdmc-plugin.rst0000664000175000017500000002310300000000000025525 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_cluster_data_model_collector_plugin: ======================================== Build a new cluster data model collector ======================================== Watcher Decision Engine has an external cluster data model (CDM) plugin interface which gives anyone the ability to integrate an external cluster data model collector (CDMC) in order to extend the initial set of cluster data model collectors Watcher provides. This section gives some guidelines on how to implement and integrate custom cluster data model collectors within Watcher. Creating a new plugin ===================== In order to create a new cluster data model collector, you have to: - Extend the :py:class:`~.base.BaseClusterDataModelCollector` class. - Implement its :py:meth:`~.BaseClusterDataModelCollector.execute` abstract method to return your entire cluster data model that this method should build. - Implement its :py:meth:`~.BaseClusterDataModelCollector.audit_scope_handler` abstract property to return your audit scope handler. - Implement its :py:meth:`~.Goal.notification_endpoints` abstract property to return the list of all the :py:class:`~.base.NotificationEndpoint` instances that will be responsible for handling incoming notifications in order to incrementally update your cluster data model. First of all, you have to extend the :class:`~.BaseClusterDataModelCollector` base class which defines the :py:meth:`~.BaseClusterDataModelCollector.execute` abstract method you will have to implement. This method is responsible for building an entire cluster data model. Here is an example showing how you can write a plugin called ``DummyClusterDataModelCollector``: .. code-block:: python # Filepath = /thirdparty/dummy.py # Import path = thirdparty.dummy from watcher.decision_engine.model import model_root from watcher.decision_engine.model.collector import base class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): def execute(self): model = model_root.ModelRoot() # Do something here... return model @property def audit_scope_handler(self): return None @property def notification_endpoints(self): return [] This implementation is the most basic one. So in order to get a better understanding on how to implement a more advanced cluster data model collector, have a look at the :py:class:`~.NovaClusterDataModelCollector` class. Define a custom model ===================== As you may have noticed in the above example, we are reusing an existing model provided by Watcher. However, this model can be easily customized by implementing a new class that would implement the :py:class:`~.Model` abstract base class. Here below is simple example on how to proceed in implementing a custom Model: .. code-block:: python # Filepath = /thirdparty/dummy.py # Import path = thirdparty.dummy from watcher.decision_engine.model import base as modelbase from watcher.decision_engine.model.collector import base class MyModel(modelbase.Model): def to_string(self): return 'MyModel' class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): def execute(self): model = MyModel() # Do something here... return model @property def notification_endpoints(self): return [] Here below is the abstract ``Model`` class that every single cluster data model should implement: .. autoclass:: watcher.decision_engine.model.base.Model :members: :special-members: __init__ :noindex: Define configuration parameters =============================== At this point, you have a fully functional cluster data model collector. By default, cluster data model collectors define a ``period`` option (see :py:meth:`~.BaseClusterDataModelCollector.get_config_opts`) that corresponds to the interval of time between each synchronization of the in-memory model. However, in more complex implementation, you may want to define some configuration options so one can tune the cluster data model collector to your needs. To do so, you can implement the :py:meth:`~.Loadable.get_config_opts` class method as followed: .. code-block:: python from oslo_config import cfg from watcher.decision_engine.model import model_root from watcher.decision_engine.model.collector import base class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): def execute(self): model = model_root.ModelRoot() # Do something here... return model @property def audit_scope_handler(self): return None @property def notification_endpoints(self): return [] @classmethod def get_config_opts(cls): return super( DummyClusterDataModelCollector, cls).get_config_opts() + [ cfg.StrOpt('test_opt', help="Demo Option.", default=0), # Some more options ... ] The configuration options defined within this class method will be included within the global ``watcher.conf`` configuration file under a section named by convention: ``{namespace}.{plugin_name}`` (see section :ref:`Register a new entry point `). The namespace for CDMC plugins is ``watcher_cluster_data_model_collectors``, so in our case, the ``watcher.conf`` configuration would have to be modified as followed: .. code-block:: ini [watcher_cluster_data_model_collectors.dummy] # Option used for testing. test_opt = test_value Then, the configuration options you define within this method will then be injected in each instantiated object via the ``config`` parameter of the :py:meth:`~.BaseClusterDataModelCollector.__init__` method. Abstract Plugin Class ===================== Here below is the abstract ``BaseClusterDataModelCollector`` class that every single cluster data model collector should implement: .. autoclass:: watcher.decision_engine.model.collector.base.BaseClusterDataModelCollector :members: :special-members: __init__ :noindex: .. _register_new_cdmc_entrypoint: Register a new entry point ========================== In order for the Watcher Decision Engine to load your new cluster data model collector, the latter must be registered as a named entry point under the ``watcher_cluster_data_model_collectors`` entry point namespace of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique. Here below is how to register ``DummyClusterDataModelCollector`` using pbr_: .. code-block:: ini [entry_points] watcher_cluster_data_model_collectors = dummy = thirdparty.dummy:DummyClusterDataModelCollector .. _pbr: https://docs.openstack.org/pbr/latest/ Add new notification endpoints ============================== At this point, you have a fully functional cluster data model collector. However, this CDMC is only refreshed periodically via a background scheduler. As you may sometimes execute a strategy with a stale CDM due to a high activity on your infrastructure, you can define some notification endpoints that will be responsible for incrementally updating the CDM based on notifications emitted by other services such as Nova. To do so, you can implement and register a new ``DummyEndpoint`` notification endpoint regarding a ``dummy`` event as shown below: .. code-block:: python from watcher.decision_engine.model import model_root from watcher.decision_engine.model.collector import base class DummyNotification(base.NotificationEndpoint): @property def filter_rule(self): return filtering.NotificationFilter( publisher_id=r'.*', event_type=r'^dummy$', ) def info(self, ctxt, publisher_id, event_type, payload, metadata): # Do some CDM modifications here... pass class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): def execute(self): model = model_root.ModelRoot() # Do something here... return model @property def notification_endpoints(self): return [DummyNotification(self)] Note that if the event you are trying to listen to is published by a new service, you may have to also add a new topic Watcher will have to subscribe to in the ``notification_topics`` option of the ``[watcher_decision_engine]`` section. Using cluster data model collector plugins ========================================== The Watcher Decision Engine service will automatically discover any installed plugins when it is restarted. If a Python package containing a custom plugin is installed within the same environment as Watcher, Watcher will automatically make that plugin available for use. At this point, you can use your new cluster data model plugin in your :ref:`strategy plugin ` by using the :py:attr:`~.BaseStrategy.collector_manager` property as followed: .. code-block:: python # [...] dummy_collector = self.collector_manager.get_cluster_model_collector( "dummy") # "dummy" is the name of the entry point we declared earlier dummy_model = dummy_collector.get_latest_cluster_data_model() # Do some stuff with this model ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/plugin/goal-plugin.rst0000664000175000017500000001726600000000000025556 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_goal_plugin: ================ Build a new goal ================ Watcher Decision Engine has an external :ref:`goal ` plugin interface which gives anyone the ability to integrate an external goal which can be achieved by a :ref:`strategy `. This section gives some guidelines on how to implement and integrate custom goals with Watcher. If you wish to create a third-party package for your plugin, you can refer to our :ref:`documentation for third-party package creation `. Pre-requisites ============== Before using any goal, please make sure that none of the existing goals fit your needs. Indeed, the underlying value of defining a goal is to be able to compare the efficacy of the action plans resulting from the various strategies satisfying the same goal. By doing so, Watcher can assist the administrator in his choices. Create a new plugin =================== In order to create a new goal, you have to: - Extend the :py:class:`~.base.Goal` class. - Implement its :py:meth:`~.Goal.get_name` class method to return the **unique** ID of the new goal you want to create. This unique ID should be the same as the name of :ref:`the entry point you will declare later on `. - Implement its :py:meth:`~.Goal.get_display_name` class method to return the translated display name of the goal you want to create. Note: Do not use a variable to return the translated string so it can be automatically collected by the translation tool. - Implement its :py:meth:`~.Goal.get_translatable_display_name` class method to return the translation key (actually the english display name) of your new goal. The value return should be the same as the string translated in :py:meth:`~.Goal.get_display_name`. - Implement its :py:meth:`~.Goal.get_efficacy_specification` method to return the :ref:`efficacy specification ` for your goal. Here is an example showing how you can define a new ``NewGoal`` goal plugin: .. code-block:: python # filepath: thirdparty/new.py # import path: thirdparty.new from watcher._i18n import _ from watcher.decision_engine.goal import base from watcher.decision_engine.goal.efficacy import specs class NewGoal(base.Goal): @classmethod def get_name(cls): return "new_goal" # Will be the name of the entry point @classmethod def get_display_name(cls): return _("New Goal") @classmethod def get_translatable_display_name(cls): return "New Goal" @classmethod def get_efficacy_specification(cls): return specs.Unclassified() As you may have noticed, the :py:meth:`~.Goal.get_efficacy_specification` method returns an :py:meth:`~.Unclassified` instance which is provided by Watcher. This efficacy specification is useful during the development process of your goal as it corresponds to an empty specification. If you want to learn more about what efficacy specifications are used for or to define your own efficacy specification, please refer to the :ref:`related section below `. Abstract Plugin Class ===================== Here below is the abstract :py:class:`~.base.Goal` class: .. autoclass:: watcher.decision_engine.goal.base.Goal :members: :noindex: .. _goal_plugin_add_entrypoint: Add a new entry point ===================== In order for the Watcher Decision Engine to load your new goal, the goal must be registered as a named entry point under the ``watcher_goals`` entry point namespace of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique and should be the same as the value returned by the :py:meth:`~.base.Goal.get_name` class method of your goal. Here below is how you would proceed to register ``NewGoal`` using pbr_: .. code-block:: ini [entry_points] watcher_goals = new_goal = thirdparty.new:NewGoal To get a better understanding on how to implement a more advanced goal, have a look at the :py:class:`watcher.decision_engine.goal.goals.ServerConsolidation` class. .. _pbr: https://docs.openstack.org/pbr/latest .. _implement_efficacy_specification: Implement a customized efficacy specification ============================================= What is it for? --------------- Efficacy specifications define a set of specifications for a given goal. These specifications actually define a list of indicators which are to be used to compute a global efficacy that outlines how well a strategy performed when trying to achieve the goal it is associated to. The idea behind such specification is to give the administrator the possibility to run an audit using different strategies satisfying the same goal and be able to judge how they performed at a glance. Implementation -------------- In order to create a new efficacy specification, you have to: - Extend the :py:class:`~.EfficacySpecification` class. - Implement :py:meth:`~.EfficacySpecification.get_indicators_specifications` by returning a list of :py:class:`~.IndicatorSpecification` instances. * Each :py:class:`~.IndicatorSpecification` instance should actually extend the latter. * Each indicator specification should have a **unique name** which should be a valid Python variable name. * They should implement the :py:attr:`~.EfficacySpecification.schema` abstract property by returning a :py:class:`~.voluptuous.Schema` instance. This schema is the contract the strategy will have to comply with when setting the value associated to the indicator specification within its solution (see the :ref:`architecture of Watcher ` for more information on the audit execution workflow). - Implement the :py:meth:`~.EfficacySpecification.get_global_efficacy` method: it should compute the global efficacy for the goal it achieves based on the efficacy indicators you just defined. Here below is an example of an efficacy specification containing one indicator specification: .. code-block:: python from watcher._i18n import _ from watcher.decision_engine.goal.efficacy import base as efficacy_base from watcher.decision_engine.goal.efficacy import indicators from watcher.decision_engine.solution import efficacy class IndicatorExample(IndicatorSpecification): def __init__(self): super(IndicatorExample, self).__init__( name="indicator_example", description=_("Example of indicator specification."), unit=None, ) @property def schema(self): return voluptuous.Schema(voluptuous.Range(min=0), required=True) class UnclassifiedStrategySpecification(efficacy_base.EfficacySpecification): def get_indicators_specifications(self): return [IndicatorExample()] def get_global_efficacy(self, indicators_map): return efficacy.Indicator( name="global_efficacy_indicator", description="Example of global efficacy indicator", unit="%", value=indicators_map.indicator_example % 100) To get a better understanding on how to implement an efficacy specification, have a look at :py:class:`~.ServerConsolidationSpecification`. Also, if you want to see a concrete example of an indicator specification, have a look at :py:class:`~.ReleasedComputeNodesCount`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/plugin/index.rst0000664000175000017500000000031400000000000024431 0ustar00zuulzuul00000000000000============ Plugin Guide ============ .. toctree:: :maxdepth: 1 base-setup action-plugin cdmc-plugin goal-plugin planner-plugin scoring-engine-plugin strategy-plugin plugins ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/plugin/planner-plugin.rst0000664000175000017500000001374100000000000026265 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_planner_plugin: =================== Build a new planner =================== Watcher :ref:`Decision Engine ` has an external :ref:`planner ` plugin interface which gives anyone the ability to integrate an external :ref:`planner ` in order to extend the initial set of planners Watcher provides. This section gives some guidelines on how to implement and integrate custom planners with Watcher. .. _Decision Engine: watcher_decision_engine_definition Creating a new plugin ===================== First of all you have to extend the base :py:class:`~.BasePlanner` class which defines an abstract method that you will have to implement. The :py:meth:`~.BasePlanner.schedule` is the method being called by the Decision Engine to schedule a given solution (:py:class:`~.BaseSolution`) into an :ref:`action plan ` by ordering/sequencing an unordered set of actions contained in the proposed solution (for more details, see :ref:`definition of a solution `). Here is an example showing how you can write a planner plugin called ``DummyPlanner``: .. code-block:: python # Filepath = third-party/third_party/dummy.py # Import path = third_party.dummy from oslo_utils import uuidutils from watcher.decision_engine.planner import base class DummyPlanner(base.BasePlanner): def _create_action_plan(self, context, audit_id): action_plan_dict = { 'uuid': uuidutils.generate_uuid(), 'audit_id': audit_id, 'first_action_id': None, 'state': objects.action_plan.State.RECOMMENDED } new_action_plan = objects.ActionPlan(context, **action_plan_dict) new_action_plan.create(context) new_action_plan.save() return new_action_plan def schedule(self, context, audit_id, solution): # Empty action plan action_plan = self._create_action_plan(context, audit_id) # todo: You need to create the workflow of actions here # and attach it to the action plan return action_plan This implementation is the most basic one. So if you want to have more advanced examples, have a look at the implementation of planners already provided by Watcher like :py:class:`~.DefaultPlanner`. A list with all available planner plugins can be found :ref:`here `. Define configuration parameters =============================== At this point, you have a fully functional planner. However, in more complex implementation, you may want to define some configuration options so one can tune the planner to its needs. To do so, you can implement the :py:meth:`~.Loadable.get_config_opts` class method as followed: .. code-block:: python from oslo_config import cfg class DummyPlanner(base.BasePlanner): # [...] def schedule(self, context, audit_uuid, solution): assert self.config.test_opt == 0 # [...] @classmethod def get_config_opts(cls): return super( DummyPlanner, cls).get_config_opts() + [ cfg.StrOpt('test_opt', help="Demo Option.", default=0), # Some more options ... ] The configuration options defined within this class method will be included within the global ``watcher.conf`` configuration file under a section named by convention: ``{namespace}.{plugin_name}``. In our case, the ``watcher.conf`` configuration would have to be modified as followed: .. code-block:: ini [watcher_planners.dummy] # Option used for testing. test_opt = test_value Then, the configuration options you define within this method will then be injected in each instantiated object via the ``config`` parameter of the :py:meth:`~.BasePlanner.__init__` method. Abstract Plugin Class ===================== Here below is the abstract ``BasePlanner`` class that every single planner should implement: .. autoclass:: watcher.decision_engine.planner.base.BasePlanner :members: :special-members: __init__ :noindex: Register a new entry point ========================== In order for the Watcher Decision Engine to load your new planner, the latter must be registered as a new entry point under the ``watcher_planners`` entry point namespace of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique. Here below is how you would proceed to register ``DummyPlanner`` using pbr_: .. code-block:: ini [entry_points] watcher_planners = dummy = third_party.dummy:DummyPlanner .. _pbr: https://docs.openstack.org/pbr/latest Using planner plugins ===================== The :ref:`Watcher Decision Engine ` service will automatically discover any installed plugins when it is started. This means that if Watcher is already running when you install your plugin, you will have to restart the related Watcher services. If a Python package containing a custom plugin is installed within the same environment as Watcher, Watcher will automatically make that plugin available for use. At this point, Watcher will use your new planner if you referenced it in the ``planner`` option under the ``[watcher_planner]`` section of your ``watcher.conf`` configuration file when you started it. For example, if you want to use the ``dummy`` planner you just installed, you would have to select it as followed: .. code-block:: ini [watcher_planner] planner = dummy As you may have noticed, only a single planner implementation can be activated at a time, so make sure it is generic enough to support all your strategies and actions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/plugin/plugins.rst0000664000175000017500000000255400000000000025013 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ================= Available Plugins ================= In this section we present all the plugins that are shipped along with Watcher. If you want to know which plugins your Watcher services have access to, you can use the :ref:`Guru Meditation Reports ` to display them. .. _watcher_goals: Goals ===== .. list-plugins:: watcher_goals :detailed: .. _watcher_scoring_engines: Scoring Engines =============== .. list-plugins:: watcher_scoring_engines :detailed: .. _watcher_scoring_engine_containers: Scoring Engine Containers ========================= .. list-plugins:: watcher_scoring_engine_containers :detailed: .. _watcher_strategies: Strategies ========== .. list-plugins:: watcher_strategies :detailed: .. _watcher_actions: Actions ======= .. list-plugins:: watcher_actions :detailed: .. _watcher_workflow_engines: Workflow Engines ================ .. list-plugins:: watcher_workflow_engines :detailed: .. _watcher_planners: Planners ======== .. list-plugins:: watcher_planners :detailed: Cluster Data Model Collectors ============================= .. list-plugins:: watcher_cluster_data_model_collectors :detailed: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/plugin/scoring-engine-plugin.rst0000664000175000017500000001740700000000000027540 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_scoring_engine_plugin: ========================== Build a new scoring engine ========================== Watcher Decision Engine has an external :ref:`scoring engine ` plugin interface which gives anyone the ability to integrate an external scoring engine in order to make use of it in a :ref:`strategy `. This section gives some guidelines on how to implement and integrate custom scoring engines with Watcher. If you wish to create a third-party package for your plugin, you can refer to our :ref:`documentation for third-party package creation `. Pre-requisites ============== Because scoring engines execute a purely mathematical tasks, they typically do not have any additional dependencies. Additional requirements might be defined by specific scoring engine implementations. For example, some scoring engines might require to prepare learning data, which has to be loaded during the scoring engine startup. Some other might require some external services to be available (e.g. if the scoring infrastructure is running in the cloud). Create a new scoring engine plugin ================================== In order to create a new scoring engine you have to: - Extend the :py:class:`watcher.decision_engine.scoring.base.ScoringEngine` class - Implement its :py:meth:`~.ScoringEngine.get_name` method to return the **unique** ID of the new scoring engine you want to create. This unique ID should be the same as the name of :ref:`the entry point we will declare later on `. - Implement its :py:meth:`~.ScoringEngine.get_description` method to return the user-friendly description of the implemented scoring engine. It might contain information about algorithm used, learning data etc. - Implement its :py:meth:`~.ScoringEngine.get_metainfo` method to return the machine-friendly metadata about this scoring engine. For example, it could be a JSON formatted text with information about the data model used, its input and output data format, column names, etc. - Implement its :py:meth:`~.ScoringEngine.calculate_score` method to return the result calculated by this scoring engine. Here is an example showing how you can write a plugin called ``NewScorer``: .. code-block:: python # filepath: thirdparty/new.py # import path: thirdparty.new from watcher.decision_engine.scoring import base class NewScorer(base.ScoringEngine): def get_name(self): return 'new_scorer' def get_description(self): return '' def get_metainfo(self): return """{ "feature_columns": [ "column1", "column2", "column3"], "result_columns": [ "value", "probability"] }""" def calculate_score(self, features): return '[12, 0.83]' As you can see in the above example, the :py:meth:`~.ScoringEngine.calculate_score` method returns a string. Both this class and the client (caller) should perform all the necessary serialization or deserialization. (Optional) Create a new scoring engine container plugin ======================================================= Optionally, it's possible to implement a container plugin, which can return a list of scoring engines. This list can be re-evaluated multiple times during the lifecycle of :ref:`Watcher Decision Engine ` and synchronized with :ref:`Watcher Database ` using the ``watcher-sync`` command line tool. Below is an example of a container using some scoring engine implementation that is simply made of a client responsible for communicating with a real scoring engine deployed as a web service on external servers: .. code-block:: python class NewScoringContainer(base.ScoringEngineContainer): @classmethod def get_scoring_engine_list(self): return [ RemoteScoringEngine( name='scoring_engine1', description='Some remote Scoring Engine 1', remote_url='http://engine1.example.com/score'), RemoteScoringEngine( name='scoring_engine2', description='Some remote Scoring Engine 2', remote_url='http://engine2.example.com/score'), ] Abstract Plugin Class ===================== Here below is the abstract :py:class:`watcher.decision_engine.scoring.base.ScoringEngine` class: .. autoclass:: watcher.decision_engine.scoring.base.ScoringEngine :members: :special-members: __init__ :noindex: Abstract Plugin Container Class =============================== Here below is the abstract :py:class:`~.ScoringContainer` class: .. autoclass:: watcher.decision_engine.scoring.base.ScoringEngineContainer :members: :special-members: __init__ :noindex: .. _scoring_engine_plugin_add_entrypoint: Add a new entry point ===================== In order for the Watcher Decision Engine to load your new scoring engine, it must be registered as a named entry point under the ``watcher_scoring_engines`` entry point of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique and should be the same as the value returned by the :py:meth:`~.ScoringEngine.get_name` method of your strategy. Here below is how you would proceed to register ``NewScorer`` using pbr_: .. code-block:: ini [entry_points] watcher_scoring_engines = new_scorer = thirdparty.new:NewScorer To get a better understanding on how to implement a more advanced scoring engine, have a look at the :py:class:`~.DummyScorer` class. This implementation is not really using machine learning, but other than that it contains all the pieces which the "real" implementation would have. In addition, for some use cases there is a need to register a list (possibly dynamic, depending on the implementation and configuration) of scoring engines in a single plugin, so there is no need to restart :ref:`Watcher Decision Engine ` every time such list changes. For these cases, an additional ``watcher_scoring_engine_containers`` entry point can be used. For the example how to use scoring engine containers, please have a look at the :py:class:`~.DummyScoringContainer` and the way it is configured in ``setup.cfg``. For new containers it could be done like this: .. code-block:: ini [entry_points] watcher_scoring_engine_containers = new_scoring_container = thirdparty.new:NewContainer .. _pbr: https://docs.openstack.org/pbr/latest/ Using scoring engine plugins ============================ The Watcher Decision Engine service will automatically discover any installed plugins when it is restarted. If a Python package containing a custom plugin is installed within the same environment as Watcher, Watcher will automatically make that plugin available for use. At this point, Watcher will scan and register inside the :ref:`Watcher Database ` all the scoring engines you implemented upon restarting the :ref:`Watcher Decision Engine `. In addition, ``watcher-sync`` tool can be used to trigger :ref:`Watcher Database ` synchronization. This might be used for "dynamic" scoring containers, which can return different scoring engines based on some external configuration (if they support that). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/plugin/strategy-plugin.rst0000664000175000017500000002715000000000000026467 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_strategy_plugin: ================================= Build a new optimization strategy ================================= Watcher Decision Engine has an external :ref:`strategy ` plugin interface which gives anyone the ability to integrate an external strategy in order to make use of placement algorithms. This section gives some guidelines on how to implement and integrate custom strategies with Watcher. If you wish to create a third-party package for your plugin, you can refer to our :ref:`documentation for third-party package creation `. Pre-requisites ============== Before using any strategy, you should make sure you have your Telemetry service configured so that it would provide you all the metrics you need to be able to use your strategy. Create a new strategy plugin ============================ In order to create a new strategy, you have to: - Extend the :py:class:`~.UnclassifiedStrategy` class - Implement its :py:meth:`~.BaseStrategy.get_name` class method to return the **unique** ID of the new strategy you want to create. This unique ID should be the same as the name of :ref:`the entry point we will declare later on `. - Implement its :py:meth:`~.BaseStrategy.get_display_name` class method to return the translated display name of the strategy you want to create. Note: Do not use a variable to return the translated string so it can be automatically collected by the translation tool. - Implement its :py:meth:`~.BaseStrategy.get_translatable_display_name` class method to return the translation key (actually the English display name) of your new strategy. The value return should be the same as the string translated in :py:meth:`~.BaseStrategy.get_display_name`. - Implement its :py:meth:`~.BaseStrategy.execute` method to return the solution you computed within your strategy. Here is an example showing how you can write a plugin called ``NewStrategy``: .. code-block:: python # filepath: thirdparty/new.py # import path: thirdparty.new import abc from watcher._i18n import _ from watcher.decision_engine.strategy.strategies import base class NewStrategy(base.UnclassifiedStrategy): def __init__(self, osc=None): super(NewStrategy, self).__init__(osc) def execute(self, original_model): self.solution.add_action(action_type="nop", input_parameters=parameters) # Do some more stuff here ... return self.solution @classmethod def get_name(cls): return "new_strategy" @classmethod def get_display_name(cls): return _("New strategy") @classmethod def get_translatable_display_name(cls): return "New strategy" As you can see in the above example, the :py:meth:`~.BaseStrategy.execute` method returns a :py:class:`~.BaseSolution` instance as required. This solution is what wraps the abstract set of actions the strategy recommends to you. This solution is then processed by a :ref:`planner ` to produce an action plan which contains the sequenced flow of actions to be executed by the :ref:`Watcher Applier `. This solution also contains the various :ref:`efficacy indicators ` alongside its computed :ref:`global efficacy `. Please note that your strategy class will expect to find the same constructor signature as BaseStrategy to instantiate you strategy. Therefore, you should ensure that your ``__init__`` signature is identical to the :py:class:`~.BaseStrategy` one. Strategy efficacy ================= As stated before, the ``NewStrategy`` class extends a class called :py:class:`~.UnclassifiedStrategy`. This class actually implements a set of abstract methods which are defined within the :py:class:`~.BaseStrategy` parent class. One thing this :py:class:`~.UnclassifiedStrategy` class defines is that our ``NewStrategy`` achieves the ``unclassified`` goal. This goal is a peculiar one as it does not contain any indicator nor does it calculate a global efficacy. This proves itself to be quite useful during the development of a new strategy for which the goal has yet to be defined or in case a :ref:`new goal ` has yet to be implemented. Define Strategy Parameters ========================== For each new added strategy, you can add parameters spec so that an operator can input strategy parameters when creating an audit to control the :py:meth:`~.BaseStrategy.execute` behavior of strategy. This is useful to define some threshold for your strategy, and tune them at runtime. To define parameters, just implements :py:meth:`~.BaseStrategy.get_schema` to return parameters spec with `jsonschema `_ format. It is strongly encouraged that provide default value for each parameter, or else reference fails if operator specify no parameters. Here is an example showing how you can define 2 parameters for ``DummyStrategy``: .. code-block:: python class DummyStrategy(base.DummyBaseStrategy): @classmethod def get_schema(cls): return { "properties": { "para1": { "description": "number parameter example", "type": "number", "default": 3.2, "minimum": 1.0, "maximum": 10.2, }, "para2": { "description": "string parameter example", "type": "string", "default": "hello", }, }, } You can reference parameters in :py:meth:`~.BaseStrategy.execute`: .. code-block:: python class DummyStrategy(base.DummyBaseStrategy): def execute(self): para1 = self.input_parameters.para1 para2 = self.input_parameters.para2 if para1 > 5: ... Operator can specify parameters with following commands: .. code:: bash $ watcher audit create -a -p para1=6.0 -p para2=hi Pls. check user-guide for details. Abstract Plugin Class ===================== Here below is the abstract :py:class:`~.BaseStrategy` class: .. autoclass:: watcher.decision_engine.strategy.strategies.base.BaseStrategy :members: :special-members: __init__ :noindex: .. _strategy_plugin_add_entrypoint: Add a new entry point ===================== In order for the Watcher Decision Engine to load your new strategy, the strategy must be registered as a named entry point under the ``watcher_strategies`` entry point of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique and should be the same as the value returned by the :py:meth:`~.BaseStrategy.get_name` class method of your strategy. Here below is how you would proceed to register ``NewStrategy`` using pbr_: .. code-block:: ini [entry_points] watcher_strategies = new_strategy = thirdparty.new:NewStrategy To get a better understanding on how to implement a more advanced strategy, have a look at the :py:class:`~.BasicConsolidation` class. .. _pbr: https://docs.openstack.org/pbr/latest Using strategy plugins ====================== The Watcher Decision Engine service will automatically discover any installed plugins when it is restarted. If a Python package containing a custom plugin is installed within the same environment as Watcher, Watcher will automatically make that plugin available for use. At this point, Watcher will scan and register inside the :ref:`Watcher Database ` all the strategies (alongside the goals they should satisfy) you implemented upon restarting the :ref:`Watcher Decision Engine `. You should take care when installing strategy plugins. By their very nature, there are no guarantees that utilizing them as is will be supported, as they may require a set of metrics which is not yet available within the Telemetry service. In such a case, please do make sure that you first check/configure the latter so your new strategy can be fully functional. Querying metrics ---------------- A large set of metrics, generated by OpenStack modules, can be used in your strategy implementation. To collect these metrics, Watcher provides a `DataSourceManager`_ for two data sources which are `Ceilometer`_ (with `Gnocchi`_ as API) and `Monasca`_. If you wish to query metrics from a different data source, you can implement your own and use it via DataSourceManager from within your new strategy. Indeed, strategies in Watcher have the cluster data models decoupled from the data sources which means that you may keep the former while changing the latter. The recommended way for you to support a new data source is to implement a new helper that would encapsulate within separate methods the queries you need to perform. To then use it, you would just have to add it to appropriate watcher_strategies.* section in config file. If you want to use Ceilometer but with your own metrics database backend, please refer to the `Ceilometer developer guide`_. The list of the available Ceilometer backends is located here_. The `Ceilosca`_ project is a good example of how to create your own pluggable backend. Moreover, if your strategy requires new metrics not covered by Ceilometer, you can add them through a `Ceilometer plugin`_. .. _`DataSourceManager`: https://github.com/openstack/watcher/blob/master/watcher/datasource/manager.py .. _`Ceilometer developer guide`: https://docs.openstack.org/ceilometer/latest/contributor/architecture.html#storing-accessing-the-data .. _`Ceilometer`: https://docs.openstack.org/ceilometer/latest .. _`Monasca`: https://github.com/openstack/monasca-api/blob/master/docs/monasca-api-spec.md .. _`here`: https://docs.openstack.org/ceilometer/latest/contributor/install/dbreco.html#choosing-a-database-backend .. _`Ceilometer plugin`: https://docs.openstack.org/ceilometer/latest/contributor/plugins.html .. _`Ceilosca`: https://github.com/openstack/monasca-ceilometer/blob/master/ceilosca/ceilometer/storage/impl_monasca.py .. _`Gnocchi`: https://gnocchi.xyz/ Read usage metrics using the Watcher Datasource Helper ------------------------------------------------------ The following code snippet shows how datasource_backend is defined: .. code-block:: py from watcher.datasource import manager as ds_manager @property def datasource_backend(self): if not self._datasource_backend: # Load the global preferred datasources order but override it # if the strategy has a specific datasources config datasources = CONF.watcher_datasources if self.config.datasources: datasources = self.config self._datasource_backend = ds_manager.DataSourceManager( config=datasources, osc=self.osc ).get_backend(self.DATASOURCE_METRICS) return self._datasource_backend Using that you can now query the values for that specific metric: .. code-block:: py avg_meter = self.datasource_backend.statistic_aggregation( instance.uuid, 'instance_cpu_usage', self.periods['instance'], self.granularity, aggregation=self.aggregation_method['instance']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/rally_link.rst0000664000175000017500000000005400000000000024165 0ustar00zuulzuul00000000000000.. include:: ../../../rally-jobs/README.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/contributor/testing.rst0000664000175000017500000000257400000000000023513 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ================= Developer Testing ================= .. _unit_tests: Unit tests ========== All unit tests should be run using `tox`_. Before running the unit tests, you should download the latest `watcher`_ from the github. To run the same unit tests that are executing onto `Gerrit`_ which includes ``py36``, ``py37`` and ``pep8``, you can issue the following command:: $ git clone https://opendev.org/openstack/watcher $ cd watcher $ pip install tox $ tox If you only want to run one of the aforementioned, you can then issue one of the following:: $ tox -e py36 $ tox -e py37 $ tox -e pep8 .. _tox: https://tox.readthedocs.org/ .. _watcher: https://opendev.org/openstack/watcher .. _Gerrit: https://review.opendev.org/ If you only want to run specific unit test code and don't like to waste time waiting for all unit tests to execute, you can add parameters ``--`` followed by a regex string:: $ tox -e py37 -- watcher.tests.api .. _tempest_tests: Tempest tests ============= Tempest tests for Watcher has been migrated to the external repo `watcher-tempest-plugin`_. .. _watcher-tempest-plugin: https://opendev.org/openstack/watcher-tempest-plugin ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/doc/source/datasources/0000775000175000017500000000000000000000000021237 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/datasources/grafana.rst0000664000175000017500000004223000000000000023371 0ustar00zuulzuul00000000000000================== Grafana datasource ================== Synopsis -------- Grafana can interface with many different types of storage backends that Grafana calls datasources_. Since the term datasources causes significant confusion by overlapping definitions used in Watcher these **datasources are called projects instead**. Some examples of supported projects are InfluxDB or Elasticsearch while others might be more familiar such as Monasca or Gnocchi. The Grafana datasource provides the functionality to retrieve metrics from Grafana for different projects. This functionality is achieved by using the proxy interface exposed in Grafana to communicate with Grafana projects directly. Background ********** Since queries to retrieve metrics from Grafana are proxied to the project the format of these queries will change significantly depending on the type of project. The structure of the projects themselves will also change significantly as they are structured by users and administrators. For instance, some developers might decide to store metrics about compute_nodes in MySQL and use the UUID as primary key while others use InfluxDB and use the hostname as primary key. Furthermore, datasources in Watcher should return metrics in specific units strictly defined in the baseclass_ depending on how the units are stored in the projects they might require conversion before being returned. The flexible configuration parameters of the Grafana datasource allow to specify exactly how the deployment is configured and this will enable to correct retrieval of metrics and with the correct units. .. _datasources: https://grafana.com/plugins?type=datasource .. _baseclass: https://github.com/openstack/watcher/blob/584eeefdc8/watcher/datasources/base.py Requirements ------------ The use of the Grafana datasource requires a reachable Grafana endpoint and an authentication token for access to the desired projects. The projects behind Grafana will need to contain the metrics for compute_nodes_ or instances_ and these need to be identifiable by an attribute of the Watcher datamodel_ for instance hostname or UUID. .. _compute_nodes: https://opendev.org/openstack/watcher/src/branch/master/watcher/decision_engine/model/element/node.py .. _instances: https://opendev.org/openstack/watcher/src/branch/master/watcher/decision_engine/model/element/instance.py .. _datamodel: https://opendev.org/openstack/watcher/src/branch/master/watcher/decision_engine/model/element Limitations *********** * Only the InfluxDB project is currently supported [#f1]_. * All metrics must be retrieved from the same Grafana endpoint (same URL). * All metrics must be retrieved with the same authentication token. .. [#f1] A base class for projects is available_ and easily extensible. .. _available: https://review.opendev.org/#/c/649341/24/watcher/datasources/grafana_translator/base.py Configuration ------------- Several steps are required in order to use the Grafana datasource, Most steps are related configuring Watcher to match the deployed Grafana setup such as queries proxied to the project or the type of project for any given metric. Most of the configuration can either be supplied via the traditional configuration file or in a `special yaml`_ file. .. _special yaml: https://specs.openstack.org/openstack/watcher-specs/specs/train/approved/file-based-metricmap.html token ***** First step is to generate an access token with access to the required projects. This can be done from the api_ or from the web interface_. Tokens generated from the web interface will have the same access to projects as the user that created them while using the cli allows to generate a key for a specific role.The token will only be displayed once so store it well. This token will go into the configuration file later and this parameter can not be placed in the yaml. .. _api: https://grafana.com/docs/http_api/auth/#create-api-key .. _interface: https://grafana.com/docs/http_api/auth/#create-api-token base_url ******** Next step is supplying the base url of the Grafana endpoint. The base url parameter will need to specify the type of http protocol and the use of plain text http is strongly discouraged due to the transmission of the access token. Additionally the path to the proxy interface needs to be supplied as well in case Grafana is placed in a sub directory of the web server. An example would be: ``https://mygrafana.org/api/datasource/proxy/`` were ``/api/datasource/proxy`` is the default path without any subdirectories. Likewise, this parameter can not be placed in the yaml. To prevent many errors from occurring and potentially filing the logs files it is advised to specify the desired datasource in the configuration as it would prevent the datasource manager from having to iterate and try possible datasources with the launch of each audit. To do this specify ``datasources`` in the ``[watcher_datasources]`` group. The current configuration that is required to be placed in the traditional configuration file would look like the following: .. code-block:: shell [grafana_client] token = 0JLbF0oB4R3Q2Fl337Gh4Df5VN12D3adBE3f== base_url = https://mygranfa.org/api/datasource/proxy [watcher_datasources] datasources = grafana metric parameters ***************** The last five remaining configuration parameters can all be placed both in the traditional configuration file or in the yaml, however, it is not advised to mix and match but in the case it does occur the yaml would override the settings from the traditional configuration file. All five of these parameters are dictionaries mapping specific metrics to a configuration parameter. For instance the ``project_id_map`` will specify the specific project id in Grafana to be used. The parameters are named as follow: * project_id_map * database_map * translator_map * attribute_map * query_map These five parameters are named differently if configured using the yaml configuration file. The parameters are named as follows and are in identical order as to the list of the traditional configuration file: * project * db * translator * attribute * query When specified in the yaml the parameters are no longer dictionaries instead each parameter needs to be defined per metric as sub-parameters. Examples of these parameters configured for both the yaml and traditional configuration are described at the end of this document. project_id ********** The project id's can only be determined by someone with the admin role in Grafana as that role is required to open the list of projects. The list of projects can be found on ``/datasources`` in the web interface but unfortunately it does not immediately display the project id. To display the id one can best hover the mouse over the projects and the url will show the project id's for example ``/datasources/edit/7563``. Alternatively the entire list of projects can be retrieved using the `REST api`_. To easily make requests to the REST api a tool such as Postman can be used. .. _REST api: https://grafana.com/docs/http_api/data_source/#get-all-datasources database ******** The database is the parameter for the schema / database that is actually defined in the project. For instance, if the project would be based on MySQL this is were the name of schema used within the MySQL server would be specified. For many different projects it is possible to list all the databases currently available. Tools like Postman can be used to list all the available databases per project. For InfluxDB based projects this would be with the following path and query, however be sure to construct these request in Postman as the header needs to contain the authorization token: .. code-block:: shell https://URL.DOMAIN/api/datasources/proxy/PROJECT_ID/query?q=SHOW%20DATABASES translator ********** Each translator is for a specific type of project will have a uniquely identifiable name and the baseclass allows to easily support new types of projects such as elasticsearch or prometheus. Currently only InfluxDB based projects are supported as a result the only valid value for this parameter is ` influxdb`. attribute ********* The attribute parameter specifies which attribute to use from Watcher's data model in order to construct the query. The available attributes differ per type of object in the data model but the following table shows the attributes for ComputeNodes, Instances and IronicNodes. +-----------------+-----------------+--------------------+ | ComputeNode | Instance | IronicNode | +=================+=================+====================+ | uuid | uuid | uuid | +-----------------+-----------------+--------------------+ | id | name | human_id | +-----------------+-----------------+--------------------+ | hostname | project_id | power_state | +-----------------+-----------------+--------------------+ | status | watcher_exclude | maintenance | +-----------------+-----------------+--------------------+ | disabled_reason | locked | maintenance_reason | +-----------------+-----------------+--------------------+ | state | metadata | extra | +-----------------+-----------------+--------------------+ | memory | state | | +-----------------+-----------------+--------------------+ | disk | memory | | +-----------------+-----------------+--------------------+ | disk_capacity | disk | | +-----------------+-----------------+--------------------+ | vcpus | disk_capacity | | +-----------------+-----------------+--------------------+ | | vcpus | | +-----------------+-----------------+--------------------+ Many if not all of these attributes map to attributes of the objects that are fetched from clients such as Nova. To see how these attributes are put into the data model the following source files can be analyzed for Nova_ and Ironic_. .. _Nova: https://opendev.org/openstack/watcher/src/branch/master/watcher/decision_engine/model/collector/nova.py#L304 .. _Ironic: https://opendev.org/openstack/watcher/src/branch/master/watcher/decision_engine/model/collector/ironic.py#L85 query ***** The query is the single most important parameter it will be passed to the project and should return the desired metric for the specific host and return the value in the correct unit. The units for all available metrics are documented in the `datasource baseclass`_. This might mean the query specified in this parameter is responsible for converting the unit. The following query demonstrates how such a conversion could be achieved and demonstrates the conversion from bytes to megabytes. .. code-block:: shell SELECT value/1000000 FROM memory... Queries will be formatted using the .format string method within Python. This format will currently have give attributes exposed to it labeled ``{0}`` through ``{4}``. Every occurrence of these characters within the string will be replaced with the specific attribute. {0} is the aggregate typically ``mean``, ``min``, ``max`` but ``count`` is also supported. {1} is the attribute as specified in the attribute parameter. {2} is the period of time to aggregate data over in seconds. {3} is the granularity or the interval between data points in seconds. {4} is translator specific and in the case of InfluxDB it will be used for retention_periods. **InfluxDB** Constructing the queries or rather anticipating how the results should look to be correctly interpreted by Watcher can be a challenge. The following json example demonstrates how what the result should look like and the query used to get this result. .. code-block:: json { "results": [ { "statement_id": 0, "series": [ { "name": "vmstats", "tags": { "host": "autoserver01" }, "columns": [ "time", "mean" ], "values": [ [ 1560848284284, 7680000 ] ] } ] } ] } .. code-block:: shell SELECT {0}("{0}_value") FROM "vmstats" WHERE host =~ /^{1}$/ AND "type_instance" =~ /^mem$/ AND time >= now() - {2}s GROUP BY host .. _datasource baseclass: https://opendev.org/openstack/watcher/src/branch/master/watcher/datasources/base.py Example configuration --------------------- The example configurations will show both how to achieve the entire configuration in the config file or use a combination of the regular file and yaml. Using yaml to define all the parameters for each metric is recommended since it has better human readability and supports mutli-line option definitions. Configuration file ****************** **It is important to note that the line breaks shown in between assignments of parameters can not be used in the actual configuration and these are simply here for readability reasons.** .. code-block:: shell [grafana_client] # Authentication token to gain access (string value) # Note: This option can be changed without restarting. token = eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk== # first part of the url (including https:// or http://) up until project id # part. Example: https://secure.org/api/datasource/proxy/ (string value) # Note: This option can be changed without restarting. base_url = https://monitoring-grafana.com/api/datasources/proxy/ # Project id as in url (integer value) # Note: This option can be changed without restarting. project_id_map = host_cpu_usage:1337,host_ram_usage:6969, instance_cpu_usage:1337,instance_ram_usage:9696 # Mapping of grafana databases to datasource metrics. (dict value) # Note: This option can be changed without restarting. database_map = host_cpu_usage:monit_production, host_ram_usage:monit_production,instance_cpu_usage:prod_cloud, instance_ram_usage:prod_cloud translator_map = host_cpu_usage:influxdb,host_ram_usage:influxdb, instance_cpu_usage:influxdb,instance_ram_usage:influxdb attribute_map = host_cpu_usage:hostname,host_ram_usage:hostname, instance_cpu_usage:name,instance_ram_usage:name query_map = host_cpu_usage:SELECT 100-{0}("{0}_value") FROM {4}.cpu WHERE ("host" =~ /^{1}$/ AND "type_instance" =~/^idle$/ AND time > now()-{2}s), host_ram_usage:SELECT {0}("{0}_value")/1000000 FROM {4}.memory WHERE ("host" =~ /^{1}$/) AND "type_instance" =~ /^used$/ AND time >= now()-{2}s GROUP BY "type_instance",instance_cpu_usage:SELECT {0}("{0}_value") FROM "vmstats" WHERE host =~ /^{1}$/ AND "type_instance" =~ /^cpu$/ AND time >= now() - {2}s GROUP BY host,instance_ram_usage:SELECT {0}("{0}_value") FROM "vmstats" WHERE host =~ /^{1}$/ AND "type_instance" =~ /^mem$/ AND time >= now() - {2}s GROUP BY host [grafana_translators] retention_periods = one_week:10080,one_month:302400,five_years:525600 [watcher_datasources] datasources = grafana yaml **** When using the yaml configuration file some parameters still need to be defined using the regular configuration such as the path for the yaml file these parameters are detailed below: .. code-block:: shell [grafana_client] token = eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk== base_url = https://monitoring-grafana.com/api/datasources/proxy/ [watcher_datasources] datasources = grafana [watcher_decision_engine] metric_map_path = /etc/watcher/metric_map.yaml Using the yaml allows to more effectively define the parameters per metric with greater human readability due to the availability of multi line options. These multi line options are demonstrated in the query parameters. .. code-block:: yaml grafana: host_cpu_usage: project: 1337 db: monit_production translator: influxdb attribute: hostname query: > SELECT 100-{0}("{0}_value") FROM {4}.cpu WHERE ("host" =~ /^{1}$/ AND "type_instance" =~/^idle$/ AND time > now()-{2}s) host_ram_usage: project: 6969 db: monit_production translator: influxdb attribute: hostname query: > SELECT {0}("{0}_value")/1000000 FROM {4}.memory WHERE ("host" =~ /^{1}$/) AND "type_instance" =~ /^used$/ AND time >= now()-{2}s GROUP BY "type_instance" instance_cpu_usage: project: 1337 db: prod_cloud translator: influxdb attribute: name query: > SELECT {0}("{0}_value") FROM "vmstats" WHERE host =~ /^{1}$/ AND "type_instance" =~ /^cpu$/ AND time >= now() - {2}s GROUP BY host instance_ram_usage: project: 9696 db: prod_cloud translator: influxdb attribute: name query: > SELECT {0}("{0}_value") FROM "vmstats" WHERE host =~ /^{1}$/ AND "type_instance" =~ /^mem$/ AND time >= now() - {2}s GROUP BY host External Links -------------- - `List of Grafana datasources `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/datasources/index.rst0000664000175000017500000000011000000000000023070 0ustar00zuulzuul00000000000000Datasources =========== .. toctree:: :glob: :maxdepth: 1 ./* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/datasources/prometheus.rst0000664000175000017500000001406500000000000024172 0ustar00zuulzuul00000000000000===================== Prometheus datasource ===================== Synopsis -------- The Prometheus datasource allows Watcher to use a Prometheus server as the source for collected metrics used by the Watcher decision engine. At minimum deployers must configure the ``host`` and ``port`` at which the Prometheus server is listening. Requirements ------------- It is required that Prometheus metrics contain a label to identify the hostname of the exporter from which the metric was collected. This is used to match against the Watcher cluster model ``ComputeNode.hostname``. The default for this label is ``fqdn`` and in the prometheus scrape configs would look like: .. code-block:: scrape_configs: - job_name: node static_configs: - targets: ['10.1.2.3:9100'] labels: fqdn: "testbox.controlplane.domain" This default can be overridden when a deployer uses a different label to identify the exporter host (for example ``hostname`` or ``host``, or any other label, as long as it identifies the host). Internally this label is used in creating a ``fqdn_instance_map``, mapping the fqdn with the Prometheus instance label associated with each exporter. The keys of the resulting fqdn_instance_map are expected to match the ``ComputeNode.hostname`` used in the Watcher decision engine cluster model. An example ``fqdn_instance_map`` is the following: .. code-block:: { 'ena.controlplane.domain': '10.1.2.1:9100', 'dio.controlplane.domain': '10.1.2.2:9100', 'tria.controlplane.domain': '10.1.2.3:9100' } For instance metrics, it is required that Prometheus contains a label with the uuid of the OpenStack instance in each relevant metric. By default, the datasource will look for the label ``resource``. The ``instance_uuid_label`` config option in watcher.conf allows deployers to override this default to any other label name that stores the ``uuid``. Limitations ----------- The current implementation doesn't support the ``statistic_series`` function of the Watcher ``class DataSourceBase``. It is expected that the ``statistic_aggregation`` function (which is implemented) is sufficient in providing the **current** state of the managed resources in the cluster. The ``statistic_aggregation`` function defaults to querying back 300 seconds, starting from the present time (the time period is a function parameter and can be set to a value as required). Implementing the ``statistic_series`` can always be re-visited if the requisite interest and work cycles are volunteered by the interested parties. One further note about a limitation in the implemented ``statistic_aggregation`` function. This function is defined with a ``granularity`` parameter, to be used when querying whichever of the Watcher ``DataSourceBase`` metrics providers. In the case of Prometheus, we do not fetch and then process individual metrics across the specified time period. Instead we use the PromQL querying operators and functions, so that the server itself will process the request across the specified parameters and then return the result. So ``granularity`` parameter is redundant and remains unused for the Prometheus implementation of ``statistic_aggregation``. The granularity of the data fetched by Prometheus server is specified in configuration as the server ``scrape_interval`` (current default 15 seconds). Configuration ------------- A deployer must set the ``datasources`` parameter to include ``prometheus`` under the watcher_datasources section of watcher.conf (or add ``prometheus`` in datasources for a specific strategy if preferred eg. under the ``[watcher_strategies.workload_stabilization]`` section). The watcher.conf configuration file is also used to set the parameter values required by the Watcher Prometheus data source. The configuration can be added under the ``[prometheus_client]`` section and the available options are duplicated below from the code as they are self documenting: .. code-block:: cfg.StrOpt('host', help="The hostname or IP address for the prometheus server."), cfg.StrOpt('port', help="The port number used by the prometheus server."), cfg.StrOpt('fqdn_label', default="fqdn", help="The label that Prometheus uses to store the fqdn of " "exporters. Defaults to 'fqdn'."), cfg.StrOpt('instance_uuid_label', default="resource", help="The label that Prometheus uses to store the uuid of " "OpenStack instances. Defaults to 'resource'."), cfg.StrOpt('username', help="The basic_auth username to use to authenticate with the " "Prometheus server."), cfg.StrOpt('password', secret=True, help="The basic_auth password to use to authenticate with the " "Prometheus server."), cfg.StrOpt('cafile', help="Path to the CA certificate for establishing a TLS " "connection with the Prometheus server."), cfg.StrOpt('certfile', help="Path to the client certificate for establishing a TLS " "connection with the Prometheus server."), cfg.StrOpt('keyfile', help="Path to the client key for establishing a TLS " "connection with the Prometheus server."), The ``host`` and ``port`` are **required** configuration options which have no set default. These specify the hostname (or IP) and port for at which the Prometheus server is listening. The ``fqdn_label`` allows deployers to override the required metric label used to match Prometheus node exporters against the Watcher ComputeNodes in the Watcher decision engine cluster data model. The default is ``fqdn`` and deployers can specify any other value (e.g. if they have an equivalent but different label such as ``host``). So a sample watcher.conf configured to use the Prometheus server at ``10.2.3.4:9090`` would look like the following: .. code-block:: [watcher_datasources] datasources = prometheus [prometheus_client] host = 10.2.3.4 port = 9090 fqdn_label = fqdn ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/glossary.rst0000664000175000017500000002743000000000000021325 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ======== Glossary ======== .. _glossary: :sorted: This page explains the different terms used in the Watcher system. They are sorted in alphabetical order. .. _action_definition: Action ====== .. watcher-term:: watcher.api.controllers.v1.action .. _action_plan_definition: Action Plan =========== .. watcher-term:: watcher.api.controllers.v1.action_plan .. _administrator_definition: Administrator ============= The :ref:`Administrator ` is any user who has admin access on the OpenStack cluster. This user is allowed to create new projects for tenants, create new users and assign roles to each user. The :ref:`Administrator ` usually has remote access to any host of the cluster in order to change the configuration and restart any OpenStack service, including Watcher. In the context of Watcher, the :ref:`Administrator ` is a role for users which allows them to run any Watcher commands, such as: - Create/Delete an :ref:`Audit Template ` - Launch an :ref:`Audit ` - Get the :ref:`Action Plan ` - Launch a recommended :ref:`Action Plan ` manually - Archive previous :ref:`Audits ` and :ref:`Action Plans ` The :ref:`Administrator ` is also allowed to modify any Watcher configuration files and to restart Watcher services. .. _audit_definition: Audit ===== .. watcher-term:: watcher.api.controllers.v1.audit .. _audit_template_definition: Audit Scope =========== An Audit Scope is a set of audited resources. Audit Scope should be defined in each Audit Template (which contains the Audit settings). .. _audit_scope_definition: Audit Template ============== .. watcher-term:: watcher.api.controllers.v1.audit_template .. _availability_zone_definition: Availability Zone ================= Please, read `the official OpenStack definition of an Availability Zone `_. .. _cluster_definition: Cluster ======= A :ref:`Cluster ` is a set of physical machines which provide compute, storage and networking resources and are managed by the same OpenStack Controller node. A :ref:`Cluster ` represents a set of resources that a cloud provider is able to offer to his/her :ref:`customers `. A data center may contain several clusters. The :ref:`Cluster ` may be divided in one or several :ref:`Availability Zone(s) `. .. _cluster_data_model_definition: Cluster Data Model (CDM) ======================== .. watcher-term:: watcher.decision_engine.model.collector.base .. _controller_node_definition: Controller Node =============== Please, read `the official OpenStack definition of a Controller Node `_. In many configurations, Watcher will reside on a controller node even if it can potentially be hosted on a dedicated machine. .. _compute_node_definition: Compute node ============ Please, read `the official OpenStack definition of a Compute Node `_. .. _customer_definition: Customer ======== A :ref:`Customer ` is the person or company which subscribes to the cloud provider offering. A customer may have several :ref:`Project(s) ` hosted on the same :ref:`Cluster ` or dispatched on different clusters. In the private cloud context, the :ref:`Customers ` are different groups within the same organization (different departments, project teams, branch offices and so on). Cloud infrastructure includes the ability to precisely track each customer's service usage so that it can be charged back to them, or at least reported to them. .. _goal_definition: Goal ==== .. watcher-term:: watcher.api.controllers.v1.goal .. _host_aggregates_definition: Host Aggregate ============== Please, read `the official OpenStack definition of a Host Aggregate `_. .. _instance_definition: Instance ======== A running virtual machine, or a virtual machine in a known state such as suspended, that can be used like a hardware server. .. _managed_resource_definition: Managed resource ================ A :ref:`Managed resource ` is one instance of :ref:`Managed resource type ` in a topology with particular properties and dependencies on other :ref:`Managed resources ` (relationships). For example, a :ref:`Managed resource ` can be one virtual machine (i.e., an :ref:`instance `) hosted on a :ref:`compute node ` and connected to another virtual machine through a network link (represented also as a :ref:`Managed resource ` in the :ref:`Cluster Data Model `). .. _managed_resource_type_definition: Managed resource type ===================== A :ref:`Managed resource type ` is a type of hardware or software element of the :ref:`Cluster ` that the Watcher system can act on. Here are some examples of :ref:`Managed resource types `: - `Nova Host Aggregates `_ - `Nova Servers `_ - `Cinder Volumes `_ - `Neutron Routers `_ - `Neutron Networks `_ - `Neutron load-balancers `_ - `Sahara Hadoop Cluster `_ - ... It can be any of `the official list of available resource types defined in OpenStack for HEAT `_. .. _efficacy_indicator_definition: Efficacy Indicator ================== .. watcher-term:: watcher.api.controllers.v1.efficacy_indicator .. _efficacy_specification_definition: Efficacy Specification ====================== .. watcher-term:: watcher.decision_engine.goal.efficacy.base .. _efficacy_definition: Optimization Efficacy ===================== The :ref:`Optimization Efficacy ` is the objective measure of how much of the :ref:`Goal ` has been achieved in respect with constraints and :ref:`SLAs ` defined by the :ref:`Customer `. The way efficacy is evaluated will depend on the :ref:`Goal ` to achieve. Of course, the efficacy will be relevant only as long as the :ref:`Action Plan ` is relevant (i.e., the current state of the :ref:`Cluster ` has not changed in a way that a new :ref:`Audit ` would need to be launched). For example, if the :ref:`Goal ` is to lower the energy consumption, the :ref:`Efficacy ` will be computed using several :ref:`efficacy indicators ` (KPIs): - the percentage of energy gain (which must be the highest possible) - the number of :ref:`SLA violations ` (which must be the lowest possible) - the number of virtual machine migrations (which must be the lowest possible) All those indicators are computed within a given timeframe, which is the time taken to execute the whole :ref:`Action Plan `. The efficacy also enables the :ref:`Administrator ` to objectively compare different :ref:`Strategies ` for the same goal and same workload of the :ref:`Cluster `. .. _project_definition: Project ======= :ref:`Projects ` represent the base unit of "ownership" in OpenStack, in that all :ref:`resources ` in OpenStack should be owned by a specific :ref:`project `. In OpenStack Identity, a :ref:`project ` must be owned by a specific domain. Please, read `the official OpenStack definition of a Project `_. .. _scoring_engine_definition: Scoring Engine ============== .. watcher-term:: watcher.api.controllers.v1.scoring_engine .. _sla_definition: SLA === :ref:`SLA ` means Service Level Agreement. The resources are negotiated between the :ref:`Customer ` and the Cloud Provider in a contract. Most of the time, this contract is composed of two documents: - :ref:`SLA ` : Service Level Agreement - :ref:`SLO ` : Service Level Objectives Note that the :ref:`SLA ` is more general than the :ref:`SLO ` in the sense that the former specifies what service is to be provided, how it is supported, times, locations, costs, performance, and responsibilities of the parties involved while the :ref:`SLO ` focuses on more measurable characteristics such as availability, throughput, frequency, response time or quality. You can also read `the Wikipedia page for SLA `_ which provides a good definition. .. _sla_violation_definition: SLA violation ============= A :ref:`SLA violation ` happens when a :ref:`SLA ` defined with a given :ref:`Customer ` could not be respected by the cloud provider within the timeframe defined by the official contract document. .. _slo_definition: SLO === A Service Level Objective (SLO) is a key element of a :ref:`SLA ` between a service provider and a :ref:`Customer `. SLOs are agreed as a means of measuring the performance of the Service Provider and are outlined as a way of avoiding disputes between the two parties based on misunderstanding. You can also read `the Wikipedia page for SLO `_ which provides a good definition. .. _solution_definition: Solution ======== .. watcher-term:: watcher.decision_engine.solution.base .. _strategy_definition: Strategy ======== .. watcher-term:: watcher.api.controllers.v1.strategy .. _watcher_applier_definition: Watcher Applier =============== .. watcher-term:: watcher.applier.base .. _watcher_database_definition: Watcher Database ================ This database stores all the Watcher domain objects which can be requested by the Watcher API or the Watcher CLI: - Audit templates - Audits - Action plans - Actions - Goals The Watcher domain being here "*optimization of some resources provided by an OpenStack system*". See :doc:`architecture` for more details on this component. .. _watcher_decision_engine_definition: Watcher Decision Engine ======================= .. watcher-term:: watcher.decision_engine.manager .. _watcher_planner_definition: Watcher Planner =============== .. watcher-term:: watcher.decision_engine.planner.base ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.5991352 python_watcher-14.0.0/doc/source/image_src/0000775000175000017500000000000000000000000020653 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/doc/source/image_src/dia/0000775000175000017500000000000000000000000021410 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/dia/architecture.dia0000664000175000017500000000617600000000000024563 0ustar00zuulzuul00000000000000 ][o8~0< e<`v1X`^V(R IPRd˒6 \EyÏn]-4y=&Ga2KQx=?>߼aßE܌^_(~Xy8Zex (t1~j4Z`6,p74U2r4N]wU|U56X, ǘ6U߆v72"NzkeRK(,|WRS]n$ E@eCP)hWMN  -/o,ς(߅iIg=r(#&_mE܍5G*kpT]q$~~:󎽡:8h.t255]W.v S~ED*}՜*S҅b(%k>Wiߏ(~ x 3p_.̣Y/ΠL4nܰo9LHrI7qYNb t" ?0YPD>Pe.hI:`*=״?Y@KٹBơ&iQHdgQrY"36Z3P i5PQQʟ=c<+sjZ~A wE4J\5*)WN vLN;3>4CKpPgֿH`͝Rzw:M7!K(6O)_ݖ9{5!x(u*<{фWOÐ<ٛh*=s`BW:y?'@iht6az~v q!)ۨ_4y,`FHQ\? ׫^YB+\$($J cl`giRXYdA ÑfG u}.ݙd/b$K撈0M1q[kZ)MSbDwIK]x2Xd@Mat%vFiCfbJB'L#R2D`HG:0eeCQ梕wH]|'0%rqNip+hӌz0 ui8%ʭ.N ]w}(xn>ȃi = c\ FaPD6LF%!}TAqQ-'D҆ $uH)w iNr)}*_Å z]OAVv`L5rH=P򖝵։> g!,. Y =%oo@ ̤\0b˴H6QMty4ݩAk֗ YX>8{ڽ)&lek^ՂF cP}p14ږhD,MO ^2L!8v'hd@D[7w9QxvK̸T'rуP4ep}çuTo?xpZ]rwҧ]b(z mj>EFapn9\Zh땴:C8FS77u|Ճ ;= a,0w-ЄJ.nŕLl'1Y2giyGt^ChyBHVt)RNvc,Qa1vsCQ̸+Tm_6RrKx5Pݳ`0Rd2 I@f܋T2B>dE^$;>pȌ!yXLq?ָ_OƈH1l H Ōn/m`91AWhc)v LG( H r<^Z 3:AJ*\cڞ@J1 E5h4tM+5aq㥙f^}EhكgCҡ9#ařgX?şb+0뜭ͫ}qqzuqyN0ua&\ j$?ms,<KPkc}QI{xh~JPAА>ʶOK V@G*ø}6?!?1ǜ$sQ(Cu.hq Iń3Z k}|`<\y9VO9G)$-q3PNsTHHC(Z29ľY0Ruf}ˇ"8!>&Dg7o^*u././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/dia/functional_data_model.dia0000664000175000017500000000621400000000000026405 0ustar00zuulzuul00000000000000]ߓ6~߿b_Y%K2lrW\.gJ56elclm1S vVǟ? Áh懋_Ǚ1_ 7Dv?\'lF1 Jx;)t7|x7`S>}4T B),hΆN΋(<~~ vƩ{b zhCn">v_uIbW2;D vuB+A~XƑ rEB2븆Mmp/ 'u12457=N@>bu0щ i27w Q1ʓ?Kԕ4@z?L{~w'/_4U.3G0_&YQbN*&` n_,΀{N& > >FSz냨-X_$4e 詧ՋPeKel3QV4x+KOwqwuZԝ5y@6aLv^d cD6alY5ah rq uVE -P@E -P@E -P@E3EK;\⺎=Q66bjS,մ-5t%ԱN 3$Kƶ!+[=`l/.D(q/6hmS@P[u 9#uk#/1l@AM#W'{qR͸[FȭZkZkZkڞZ[Du{r0uV[I$=6bgF EMTJTMC1\Cj$n1AţHD-|y?*Ru %DJj2dB[gn? .@ .@ .@3TozjȠz,Z[7_i[YE.`!̚˵o%5.WZmZSGDLk_ M;+)]gΪ jfEݣ+c[hV ZhV ZhV Zh=h$gxq1'z|Yqb6?Ήat\&uRX2Ƒ iMkӭF, /2;|+ "waz0yBM1 vGY 9Z_6[D:L{cWZB XJ9M H }iVCe/Qr]{;pWo[[n2mDU!ZvMFFnͪP] CNRKS)H3Т5zapɕzh^C}zhNO|ɋuysq-cBMSB2tY ΄26Xc%&Z|Cw5{ >rl凾JH r=j%wd2\G!?7hP*CnP6@<A})eڗ|lb4׵5&U *TwD#pYFׯNR$?Tc F@5kORk78ȢeՁdU+-WX7ꎘt +pSSJu:%10`=E$Ƀg\|xSl{k)G4kN[qr0eJCn ҺfJ&I6Rlr̺չ%Z(5T(1f:r [s Qd 7>:'oV2E"/gP h*;dv5kuQ ݍD95k^}-%sKJ@W *]%tUJ@W *]%n~#>66F,l5=;9gETOY_G+"H#-$yI󬄅/cQh {DvjDmep8۞p+Fj r>ТREh}HP)nu1֥=1/ujHQ{@fMo(kJ=NRh-v2ƿM-_ho/EUaM>Ej0ldY}ehP>&EJQLhwjT %b 5JP5JP5JP5JP5JP5JP5J=1N:w2U?bF>DF5dԒAi0q{\qG[prjVbDJrQ{nk(MmsږU2Y[ bqgP ePMwꞵHS(8IgN/ǜ}*ZJ|ҹ{nF#q@qAF[ RECOMMENDED: The Watcher Planner\ncreates the Action Plan RECOMMENDED --> PENDING: Adminisrator launches\nthe Action Plan PENDING --> ONGOING: The Watcher Applier receives the request\nto launch the Action Plan ONGOING --> FAILED: Something failed while executing\nthe Action Plan in the Watcher Applier ONGOING --> SUCCEEDED: The Watcher Applier executed\nthe Action Plan successfully FAILED --> DELETED : Administrator removes\nAction Plan SUCCEEDED --> DELETED : Administrator removes\nAction Plan ONGOING --> CANCELLING : Administrator cancels\nAction Plan CANCELLING --> CANCELLED : The Watcher Applier cancelled\nthe Action Plan successfully CANCELLING --> FAILED : Something failed while cancelling\nthe Action Plan in the Watcher Applier RECOMMENDED --> CANCELLED : Administrator cancels\nAction Plan RECOMMENDED --> SUPERSEDED : The Watcher Decision Engine supersedes\nAction Plan PENDING --> CANCELLED : Administrator cancels\nAction Plan CANCELLED --> DELETED SUPERSEDED --> DELETED DELETED --> [*] @enduml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/plantuml/audit_state_machine.txt0000664000175000017500000000150300000000000027241 0ustar00zuulzuul00000000000000@startuml [*] --> PENDING: Audit requested by Administrator PENDING --> ONGOING: Audit request is received\nby the Watcher Decision Engine ONGOING --> FAILED: Audit fails\n(Exception occurred) ONGOING --> SUCCEEDED: The Watcher Decision Engine\ncould find at least one Solution ONGOING --> SUSPENDED: Administrator wants to\nsuspend the Audit SUSPENDED --> ONGOING: Administrator wants to\nresume the Audit FAILED --> DELETED : Administrator wants to\narchive/delete the Audit SUCCEEDED --> DELETED : Administrator wants to\narchive/delete the Audit PENDING --> CANCELLED : Administrator cancels\nthe Audit ONGOING --> CANCELLED : Administrator cancels\nthe Audit CANCELLED --> DELETED : Administrator wants to\narchive/delete the Audit SUSPENDED --> DELETED: Administrator wants to\narchive/delete the Audit DELETED --> [*] @enduml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/plantuml/sequence_architecture_cdmc_sync.txt0000664000175000017500000000257600000000000031656 0ustar00zuulzuul00000000000000@startuml skinparam maxMessageSize 100 actor "Administrator" == Initialization == "Administrator" -> "Decision Engine" : Start all services "Decision Engine" -> "Background Task Scheduler" : Start activate "Background Task Scheduler" "Background Task Scheduler" -> "Cluster Model Collector Loader"\ : List available cluster data models "Cluster Model Collector Loader" --> "Background Task Scheduler"\ : list of BaseClusterModelCollector instances loop for every available cluster data model collector "Background Task Scheduler" -> "Background Task Scheduler"\ : add periodic synchronization job create "Jobs Pool" "Background Task Scheduler" -> "Jobs Pool" : Create sync job end deactivate "Background Task Scheduler" hnote over "Background Task Scheduler" : Idle == Job workflow == "Background Task Scheduler" -> "Jobs Pool" : Trigger synchronization job "Jobs Pool" -> "Nova Cluster Data Model Collector" : synchronize activate "Nova Cluster Data Model Collector" "Nova Cluster Data Model Collector" -> "Nova API"\ : Fetch needed data to build the cluster data model "Nova API" --> "Nova Cluster Data Model Collector" : Needed data "Nova Cluster Data Model Collector" -> "Nova Cluster Data Model Collector"\ : Build an in-memory cluster data model ]o<-- "Nova Cluster Data Model Collector" : Done deactivate "Nova Cluster Data Model Collector" @enduml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/plantuml/sequence_create_and_launch_audit.txt0000664000175000017500000000122400000000000031744 0ustar00zuulzuul00000000000000@startuml actor Administrator Administrator -> "Watcher CLI" : watcher audit create -a "Watcher CLI" -> "Watcher API" : POST audit(parameters) "Watcher API" -> "Watcher Database" : create new audit in database (status=PENDING) "Watcher API" <-- "Watcher Database" : new audit uuid "Watcher CLI" <-- "Watcher API" : return new audit URL Administrator <-- "Watcher CLI" : new audit uuid "Watcher API" -> "AMQP Bus" : trigger_audit(new_audit.uuid) "AMQP Bus" -> "Watcher Decision Engine" : trigger_audit(new_audit.uuid) (status=ONGOING) ref over "Watcher Decision Engine" Trigger audit in the Watcher Decision Engine end ref @enduml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/plantuml/sequence_create_audit_template.txt0000664000175000017500000000135700000000000031472 0ustar00zuulzuul00000000000000@startuml actor Administrator Administrator -> "Watcher CLI" : watcher audittemplate create \ [--strategy-uuid ] "Watcher CLI" -> "Watcher API" : POST audit_template(parameters) "Watcher API" -> "Watcher Database" : Request if goal exists in database "Watcher API" <-- "Watcher Database" : OK "Watcher API" -> "Watcher Database" : Request if strategy exists in database (if provided) "Watcher API" <-- "Watcher Database" : OK "Watcher API" -> "Watcher Database" : Create new audit_template in database "Watcher API" <-- "Watcher Database" : New audit template UUID "Watcher CLI" <-- "Watcher API" : Return new audit template URL in HTTP Location Header Administrator <-- "Watcher CLI" : New audit template UUID @enduml ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=python_watcher-14.0.0/doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creation.txt 22 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_crea0000664000175000017500000000312600000000000034444 0ustar00zuulzuul00000000000000@startuml skinparam maxMessageSize 200 "Decision Engine" -> "Decision Engine" : Execute audit activate "Decision Engine" "Decision Engine" -> "Decision Engine" : Set the audit state to ONGOING "Decision Engine" -> "Strategy selector" : Select strategy activate "Strategy selector" alt A specific strategy is provided "Strategy selector" -> "Strategy selector" : Load strategy and inject the \ cluster data model else Only a goal is specified "Strategy selector" -> "Strategy selector" : select strategy "Strategy selector" -> "Strategy selector" : Load strategy and inject the \ cluster data model end "Strategy selector" -> "Decision Engine" : Return loaded Strategy deactivate "Strategy selector" "Decision Engine" -> "Strategy" : Execute the strategy activate "Strategy" "Strategy" -> "Strategy" : **pre_execute()**Checks if the strategy \ pre-requisites are all set. "Strategy" -> "Strategy" : **do_execute()**Contains the logic of the strategy "Strategy" -> "Strategy" : **post_execute()** Set the efficacy indicators "Strategy" -> "Strategy" : Compute the global efficacy of the solution \ based on the provided efficacy indicators "Strategy" -> "Decision Engine" : Return the solution deactivate "Strategy" "Decision Engine" -> "Planner" : Plan the solution that was computed by the \ strategy activate "Planner" "Planner" -> "Planner" : Store the planned solution as an action plan with its \ related actions and efficacy indicators "Planner" --> "Decision Engine" : Done deactivate "Planner" "Decision Engine" -> "Decision Engine" : Update the audit state to SUCCEEDED deactivate "Decision Engine" @enduml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/plantuml/sequence_launch_action_plan.txt0000664000175000017500000000105100000000000030756 0ustar00zuulzuul00000000000000@startuml actor Administrator Administrator -> "Watcher CLI" : watcher actionplan start "Watcher CLI" -> "Watcher API" : PATCH action_plan(state=PENDING) "Watcher API" -> "Watcher Database" : action_plan.state=PENDING "Watcher CLI" <-- "Watcher API" : HTTP 200 Administrator <-- "Watcher CLI" : OK "Watcher API" -> "AMQP Bus" : launch_action_plan(action_plan.uuid) "AMQP Bus" -> "Watcher Applier" : launch_action_plan(action_plan.uuid) ref over "Watcher Applier" Launch Action Plan in the Watcher Applier end ref @enduml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/plantuml/sequence_launch_action_plan_in_applier.txt0000664000175000017500000000252700000000000033171 0ustar00zuulzuul00000000000000@startuml "AMQP Bus" -> "Watcher Applier" : launch_action_plan(action_plan.uuid) "Watcher Applier" -> "Watcher Database" : action_plan.state=ONGOING "Watcher Applier" -[#blue]> "AMQP Bus" : notify action plan state = ONGOING "Watcher Applier" -> "Watcher Database" : get_action_list(action_plan.uuid) "Watcher Applier" <-- "Watcher Database" : actions loop for each action of the action flow create Action "Watcher Applier" -> Action : instantiate Action object with target resource id\n and input parameters "Watcher Applier" -> Action : validate_parameters() "Watcher Applier" <-- Action : OK "Watcher Applier" -[#blue]> "AMQP Bus" : notify action state = ONGOING "Watcher Applier" -> Action : preconditions() "Watcher Applier" <-- Action : OK "Watcher Applier" -> Action : execute() alt action is "migrate instance" Action -> "Nova API" : migrate(instance_id, dest_host_id) Action <-- "Nova API" : OK else action is "disable hypervisor" Action -> "Nova API" : host-update(host_id, maintenance=true) Action <-- "Nova API" : OK end "Watcher Applier" <-- Action : OK "Watcher Applier" -> "Watcher Database" : action.state=SUCCEEDED "Watcher Applier" -[#blue]> "AMQP Bus" : notify action state = SUCCEEDED end "Watcher Applier" -> "Watcher Database" : action_plan.state=SUCCEEDED "Watcher Applier" -[#blue]> "AMQP Bus" : notify action plan state = SUCCEEDED @enduml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/plantuml/sequence_overview_watcher_usage.txt0000664000175000017500000000254000000000000031710 0ustar00zuulzuul00000000000000@startuml actor Administrator == Create some Audit settings == Administrator -> Watcher : create new Audit Template (i.e. Audit settings : goal, scope, ...) Watcher -> Watcher : save Audit Template in database Administrator <-- Watcher : Audit Template UUID == Launch a new Audit == Administrator -> Watcher : launch new Audit of the Openstack infrastructure resources\nwith a previously created Audit Template Administrator <-- Watcher : Audit UUID Administrator -> Watcher : get the Audit state Administrator <-- Watcher : ONGOING Watcher -> Watcher : compute a solution to achieve optimization goal Administrator -> Watcher : get the Audit state Administrator <-- Watcher : SUCCEEDED == Get the result of the Audit == Administrator -> Watcher : get Action Plan Administrator <-- Watcher : recommended Action Plan and estimated efficacy Administrator -> Administrator : verify the recommended actions\nand evaluate the estimated gain vs aggressiveness of the solution == Launch the recommended Action Plan == Administrator -> Watcher : launch the Action Plan Administrator <-- Watcher : Action Plan has been launched Watcher -> Watcher : trigger Actions on Openstack services Administrator -> Watcher : get the Action Plan state Administrator <-- Watcher : ONGOING Administrator -> Watcher : get the Action Plan state Administrator <-- Watcher : SUCCEEDED @enduml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/plantuml/sequence_trigger_audit_in_decision_engine.txt0000664000175000017500000000371300000000000033665 0ustar00zuulzuul00000000000000@startuml skinparam maxMessageSize 100 "AMQP Bus" -> "Decision Engine" : trigger audit activate "Decision Engine" "Decision Engine" -> "Database" : update audit.state = ONGOING "AMQP Bus" <[#blue]- "Decision Engine" : notify new audit state = ONGOING "Decision Engine" -> "Database" : get audit parameters (goal, strategy, ...) "Decision Engine" <-- "Database" : audit parameters (goal, strategy, ...) "Decision Engine" --> "Decision Engine"\ : select appropriate optimization strategy (via the Strategy Selector) create Strategy "Decision Engine" -> "Strategy" : execute strategy activate "Strategy" "Strategy" -> "Cluster Data Model Collector" : get cluster data model "Cluster Data Model Collector" --> "Strategy"\ : copy of the in-memory cluster data model loop while enough history data for the strategy "Strategy" -> "Ceilometer API" : get necessary metrics "Strategy" <-- "Ceilometer API" : aggregated metrics end "Strategy" -> "Strategy"\ : compute/set needed actions for the solution so it achieves its goal "Strategy" -> "Strategy" : compute/set efficacy indicators for the solution "Strategy" -> "Strategy" : compute/set the solution global efficacy "Decision Engine" <-- "Strategy"\ : solution (unordered actions, efficacy indicators and global efficacy) deactivate "Strategy" create "Planner" "Decision Engine" -> "Planner" : load actions scheduler "Planner" --> "Decision Engine" : planner plugin "Decision Engine" -> "Planner" : schedule actions activate "Planner" "Planner" -> "Planner"\ : schedule actions according to scheduling rules/policies "Decision Engine" <-- "Planner" : new action plan deactivate "Planner" "Decision Engine" -> "Database" : save new action plan in database "Decision Engine" -> "Database" : update audit.state = SUCCEEDED "AMQP Bus" <[#blue]- "Decision Engine" : notify new audit state = SUCCEEDED deactivate "Decision Engine" hnote over "Decision Engine" : Idle @enduml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/image_src/plantuml/watcher_db_schema_diagram.txt0000664000175000017500000000672100000000000030364 0ustar00zuulzuul00000000000000@startuml !define table(x) class x << (T,#FFAAAA) >> !define primary_key(x) x !define foreign_key(x) x hide methods hide stereotypes table(goals) { primary_key(id: Integer) uuid : String[36] name : String[63] display_name : String[63] efficacy_specification : JSONEncodedList, nullable created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(strategies) { primary_key(id: Integer) foreign_key(goal_id : Integer) uuid : String[36] name : String[63] display_name : String[63] parameters_spec : JSONEncodedDict, nullable created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(audit_templates) { primary_key(id: Integer) foreign_key("goal_id : Integer") foreign_key("strategy_id : Integer, nullable") uuid : String[36] name : String[63], nullable description : String[255], nullable scope : JSONEncodedList created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(audits) { primary_key(id: Integer) foreign_key("goal_id : Integer") foreign_key("strategy_id : Integer, nullable") uuid : String[36] audit_type : String[20] state : String[20], nullable interval : Integer, nullable parameters : JSONEncodedDict, nullable scope : JSONEncodedList, nullable auto_trigger: Boolean created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(action_plans) { primary_key(id: Integer) foreign_key("audit_id : Integer, nullable") foreign_key("strategy_id : Integer") uuid : String[36] state : String[20], nullable global_efficacy : JSONEncodedList, nullable created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(actions) { primary_key(id: Integer) foreign_key("action_plan_id : Integer") uuid : String[36] action_type : String[255] input_parameters : JSONEncodedDict, nullable state : String[20], nullable parents : JSONEncodedList, nullable created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(efficacy_indicators) { primary_key(id: Integer) foreign_key("action_plan_id : Integer") uuid : String[36] name : String[63] description : String[255], nullable unit : String[63], nullable value : Numeric created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(scoring_engines) { primary_key(id: Integer) uuid : String[36] name : String[63] description : String[255], nullable metainfo : Text, nullable created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(service) { primary_key(id: Integer) name: String[255] host: String[255] last_seen_up: DateTime created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } "goals" <.. "strategies" : Foreign Key "goals" <.. "audit_templates" : Foreign Key "strategies" <.. "audit_templates" : Foreign Key "goals" <.. "audits" : Foreign Key "strategies" <.. "audits" : Foreign Key "action_plans" <.. "actions" : Foreign Key "action_plans" <.. "efficacy_indicators" : Foreign Key "strategies" <.. "action_plans" : Foreign Key "audits" <.. "action_plans" : Foreign Key @enduml ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/doc/source/images/0000775000175000017500000000000000000000000020167 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/images/action_plan_state_machine.png0000664000175000017500000023022200000000000026051 0ustar00zuulzuul00000000000000PNG  IHDR$r)5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gUzTXtplantumlx]o0e;IbjIb&bNK`2dMhF`s=H チ뾇U/8a0K,s LJPfqJ+ys 4vӬ`IR@jnw/淋J0sfF١ F ]k5""\Rv?Cr OH2A`"ܜӓaz/Ώt̺p)E)t?ɊQ6Sձ`xi6s?"ӔS↠~GHJuaؾn'eJ&' eB8L($fN4}؃q2u9"6h.{ ϪkD#8v7^$C>wd4FB`aѥs K-(Bf.34W@S%/$(̖p;?{q>5M t"/qwIDATx\UqOb_0Q1;YӲ-R2)&fZ,5/k4ɟ.f̐i+.*J RTѫ \@uǀ{ z>΃ǹ=s{hQmmm@l@l@lbt:]ll6~xooÇ38 ɢ:!(6te˖ujL׮]7np6-^ٖ,YrJhZ-R(K.֭KLLV$7@lAujÇGFF*JCo3 ;5ҥK mo͋I&ر|~ }b?y@@@pppdddbbJ +۷oPPP``B8p6 <@ppvrrZf<bvvv͋Ç+ ۷)JJx6&L ڹs_x6lҼ~mЀ>}4~ᐐb 4lƍMw}76b 4Ҟ:u4`TII9Nr)m@l0̙3}.\0GAAARk4 JJLLꫯxGy?{=vطz+88q}ik #@lFsRc iiE_KZ֢jreVebH*jCoKQzx_KTp -7b@ ~}Zd\"W06o m jNLLҮ2#?GRGʮ_5aժUNNNYYYr_[1Z6 ƶlpZpb߼yGysXQ .d駟NLL7_W9\aÆ7{.]2ɒ3okp5\ס ˌOJ@~wΝ/*55u#G/Z#|RRR&LЫW/BtؔVLkjjm^~^{57izN6`E-ͬR]_|1o<3gc=ftСAAAUUUӰ:]>|xGb&EgScŶszzzEEӧ_yg{ǎ2سgOõZ MfXx߾}eɆ^IS3MZ;mcnƺ6`E]]]aݻ5*""]z[n2Rd޷o߈# _ndee{goo߫W/??؝Ķljl7cb@l  m6KmK0 6@l[c] b_kkvE~ޕ6mر]]]=x 6 2DJlgrrr^~W_} Xflt󳷷{G 4W^s---_-##cڴi{n˗/ϛ7tOT*?pܸq_ /m6 yF5,wvvξgL[bx6pQl6RK.N:88;wN?7pK+..oTg?ن {Pxsss7oѣw…}˼ysEFC 9{~^n5tPBjdzх6ЎbFupR;;w??%%e„ zҟߥK͏?޻wÇĘ 4/^-YDhd+ˌ4gU g^zj+9|6c[B#FPT0tРª*iލ\O>::ήEuc9M/e׾ŋ cwmȶ9px$Al,W^tXwvvNOO8}+?_ 9<<\ͧMft\ћϜ9SUΔ~衇akNc7puusK``0vK.5|f{„ ˖-k666mvee{goo߫W/??szÆ ݻw5jTDD}1[n +nn:[[FͿ+?_ ,Xs[1cM=22rرF]o#[[$' b` @l b m6b  @l b m6@l  m6b m@l  m6b @lFꊉخs)b@ήV$ڲ!mMSr>;ۆؾr~e  4Y7>e3㻵;#]eFJ;듡]Kg38 v<5Mz[b{>B 4_ՒqOΓ2}EMI 4,31mw갳>c^8Ȁ6;UqIqor|]ZG^yS| :E5wٌWG%畔a,-RyqNx=lL-;8L=7/.G@l*6֟e7ҖxN27!UN++;zVNG@l^`6ʒkjN0$+;z r򟴚"~ KS::}_b2-˥ZG#@lB }3_k'w9PZ&b@ }ۇ[=wrXJh46@lc=3z؋Օwq?X{缗Io6:ˊx]9S}݉6@lKK:w甔imKx!rgDF7\ZT;6`]n]Eׯ'^#g[C;UU:7ѣ9|\]}ܜmoL9FFF FWflEc[?;}z?S_Z*/SO=6w3gvm]S7qKKK{riFF\*?xnL2,a[k/3>MkuLlkyRcm˗~" //eΒM>ç$''Tb TuEY[fNl맜6'Wrwإ+VxuΔsV|˰㯆8O'?q6:&6ʕC?|j6L)=(e~l_ݞ T(HMMUmt$IiW7){Ϝ5jѣ.9r7lsRr+Ò}!'OIMUC=luLoy͉[1|4unkl:Z#ۼ}0Rm@G]^Io#:u9[+sӣGWkf͚?gOp1j}Aviib54d56]ڽMH*SSS5 Uvmˋ3vvUշ= dwiZsc x nsblwdmM/n}Gڲ;vOLLTm6rLh'o5xyε>sI_wCY]>7ntS bΉ~ILR˗à{s9FE7\*}]gyyzSc̯1v_ֿͽM?WÑFl6@lc-'b @l3 6@l6Ml6m&b @l6@l6 @;J;+&bg 2>uMs%b:]gV0IGbVSx"s1#>g6@lMPU^Q >0%vbǽ>ܸ )Mmbm(]IYnlҙ[Nqf|h$|Gnf׹>FNl6Ѐ܂ o?=w<){w]6@lV N\0#v7$UW4i96 jEY[$\ia"3wəwLbmX򊼸%cfȌ*0TuOl k+)?cč_p7|vkMl6, _=k5Ez6 L%ō_ <{wmno[b 7Ϭ0#5sM^\riNJ rtVlm4:7L:iAR]\RIv2%T144mE(;M fKv2EeŐ &?}=-]r[1S0_FQ;6@l܂5AQeO}Ydn Ut{iݻ>\ѭlm4@(;vb”E7lMxֽSٕoC=Bo#bўewfC:$Swq:jۇm&If$ݱּFԠ)Q\*!zD_ " `aaaJRRmxgGl”ЮͪDtշJHlر#<<<11QV,m+"*=l*0FWա%=`_RpL׎$9!_kKfKlKrĤj4k `R3ܗD r+..Nw7>m_uepM2g?[Y2vpppgffʮ|>v. .VVŅ=5ֻ{;?gߎxNB.G;2?[Gvm2T)kx6%g9+L6(..VTQ~yW={+Zz =Qg*Z mxmU:,}fʢZVdff&&&FFFJm Z&5!*> ]N"7tzC1dJmiob[̖N[b[eeȎamLߩ9YbbbovLb97m!ǻᕡabhgne=[m鏻J cͻ L֚ Pˀ˰S `vRRSSJeLLLxxvLb7^6ĝtW\VWYZ\m dvqqZ̔LLLؾGwee`exeeee)m4|@Xrk4@JَIl;+ok5A&bq@Cؾ%Ώs)HJc0b-C@6EN2ju7@QFGعOc[Kp_x6ș[M-NP?413 )@lYmPaR6FWRyt0vMhRՕ'U1vR<$\6Ri"M*푵ec hDN2E} UJ-Ӳ^QwEڻ3ĶTG N.^0Tu ؜21 +);:'ɓv?GRF3 U%/ljfgRl pqO;{Qڲ'vGUyC .s{n3xabDڻhvKѕEzYqm+UaRP- 3mkT[id(88f6E3v+Ip_rۏ1mkQRN^ȷXۭF 7:B2[bۭ0%=fv>#oC_vH_7~,'BiZ[PmF9y)M >@lXv.E ݖH{$ ,rB~ؾ+I8a[*b\V?rVS?:۩e`kjwb[YWx>D-ht%eIi뷓!۞dĶ$7nת]Uo QKΙk׮ҥ` gl܂H{׼ f}WOqwwzWs'{~cukO 6*J͈sjjY]]=x 6 2DmbmHiKoy4ɨ6{vܩ/,,ܹFџ Kp϶llgJKKO8?顇RTӍ6 ō_p˽uΌ~'dfر3_QQ[o=_v%|{/˄Ç8::~wu^2͛BZZZ+=r\g֭K91m4Y^ҷP#fZv…o:KkmwamN{ `kkk}ѠAz5w\)(cba-NlRN^؂ݤA6wuuGEErdҤIr<\ <<ݻu˗͛gMVO>eeeׯ_?~(dou\Z/ͼFkժUNNN2>׮]6}uM8ŋreIEˋ]UkZ*422#?Yogg'ho@!hlL?}N5Qfv]l6 vvqY̬XI2v[ci[ к--+Wo޼)'sss '{Qe+g?۰a_UKܐb{{_+2:&4{]4]Æ ؕΝ;8p1ؾ5v.-M.\gO?r~-p֭[+OFS nT4mc25Fn.6ȰgΜ1l2v[ci[ Њ0ۉE7ꜿx:,YF·Wf^{>|xLLҚíRRR&LЫW/JvҥYg9\3Bܹ1IEc\[jjѣ+**F)'_h#F~6{y\|4@ke s2qds6.66s5x_[݂mVt7؜uʮJ/^4}̙3zR5:::VUUOåu>j(\f.͛7n6qI9nUgL,5ܗRld/B9s>cM}7j7@sTĶ9GdLnVˌaܗMۂmV5ȭ0%Ι2pqq Կ4iS3gΔךjV2<<\^paڴiK훞Zt&LXlb.ƍ^xqʔ)nիd! ~>IYaӧO+&ĂcFWup EkĶA>ݻ;d^={ԿI<\%O8pFnlTĶܗ3?L ngᷟ>n.ko@lu)3###ǎÏ ܫW~6jĈ666?x|||ڷo\[n ذaC5,//ߜ,37gS/"--QݿyUYY{ruuܗCGabL,8EJxIKc qU2/*ݳg{{L/Z~2>meowʕ+ Eo7ij[m2 on7s5x_nk7m r Z>h9EN2j[o:uֽ[_'ZK< o};V;77ҥK'N|wmb*LIoknkU푵e;:o.#m(}M o?"E>R3G ~)'/̉PkNΒƶ8=l/O"QjwgVkbͤxI0O"&!7^vm.]]ynKE 6VU^,E'm(}M;\lWWWD ZQUyEh'ŜlNYܹsE-^[d"}?s_,3rҜU5ׯ_^{իu2a„^z֥KNl7UQjFxI%yR'mVT*aF=ck#FT*}/ >A9cwm⠜9`diAAAUUU]햕eOh ׍Ibu|㺨mK^ʧ7r͊ӧO+uZL@YzӅ j]/]q &,[;.<<\L6n3xy(D ZW]ǽ>mZnm/7lн{QFEDDYΘ1cbbb9vX|{g~m)ˌݰ- 1bDn  [Dl]IYٻyjƓ6@:bL,#EaJzxI+> Al 1ktUA;N/rv6"} o? @lTF3خ,5-'BNmbv)HJs~mbvI_7~mR~B9y)MJhSGGaEڻ&7b6rXvl M;v(@l^fn8c[]m6rfVXvlKfKlPm6rvն˶0yqVhucsV2RZM{ m-- ٳ к;{1خUō_& zZV Vfv~&$ Zlx"76o6@ 3x؃jάZsGκ%3E'3b[\V6UWRN ˋKs^ɣ}.Ћ1>)kg.e9KIC[~7_+K˶08خ*a(@3;}j7bFO':#+mq=-+E @3i5E{l'HPE }IqodfgaXml U`6mMɝ6㥩SD؜2Al-. m4߭oű(5-QU^6/x3 ;w<ݼ6{--`S̥)kx:5*u?S.ٓ03c zzN73S̉=7xMcˑ뗕 / @fTFCؾs "] VzmP{Rfٖȑ'pZ6'X7{ VgmMlllxȲ;vOLLT<$NJ[z[ `}͝;eÆwmcÓ7i WbNrJCnάzًo (?ˋ3:ݷ?Dl_Κ5 ܜܹۡ믿!AlNk v#}{Z?+*cwtܸhǜƼ_PY").K#fv~&$CbJcoǎeKll0nʔgVWo{,G_gӭ[WYڜ9S6&;ؤH{W `myMlw|6)'/d@l6So0Cb&AlRՕ> S 6 bD(U`(@l6s/} b&Al]13 mbb.J`(@lwɞs~V5&ߊm56݁\V6UWRPh2>uj9vǒ&qR6@>"ٓ٧V5&sd+bG{dmPh6Mlw8%LPjtU]Z}vնF v'mbÑg6mSYt(5#'B8wt1#\B>5-؜mڲ'76(b&;"y"8hݨNp_;#v9Eǽ>=; )<6rbt%eLWF3 Tu%/.—{OlJ}&ITK]KcKiKoKTk5EMZ2Ml;T mS+sNopæv}253y곫]V,HJkjT6m!: JʊR3wǧ >%Efܗ\>sؤT5[ b&;ęKwbXej/sY[%ou#mbӕEzYq ,Yyn oo\8]MlĶ*J3Tu ,AUygWmKX~p0ۉ7nD([}v󦜨[bض  Ԟۀh0%RS>ܗY9t9G]VYtcmdϕY[&KbضM8dQvٌ8si, 8t3;_Gb;m[GCCmbے~ko*PEUyEaJ*0ZZ9ya3؜|GJg[궇y.mk#I?g?bkaS}R3+y=j,Jb3vBC"mEڻ&1 ,x"s1L<I8!)v[FlI~EعX'b@l;UuYq߸.wVN^('/﷌/ o{ǽ>7\<'(NX[Pl߾=,,LTT* Ķ8jag/~HBKHK3{x'Ρ/اvu.ϽC]Ѝ[CBB$wVy$ۖ>b+CbG'/;#cmẒ2Ƨ5]ʎl-NE~Qa3.C?}i+ lmITFÀض$yq m)M ܸ0%3křߝP&vlVqȑ?EQal*Ci~yffbHiKo XH}̊߸.sjjujjOP .['匷|y5] 8o{& 2vi&ma@l[˶('/d@l`*n% Jp_icsVRJqqJJNN yoMog+~̭RSh{+wS mxll5mKU@~2 2M >æJ`Kf >In>w>iZF)-z埆,[7tnpK/BZdlsڲY2/([*+[ambۂΏs_ m:]UaJz]I3N.^zO혘 ۷(ItVvQĶȍMwj t 2y wfx'Oǽ>iY NGK4"/[!"[$%[GiV K_%igr³pmJT*cbbåQw4mE_BEHK&J 6Hw _=6ge13d&k_[voLIDiԯ[DmE_BEHK&MJxI) m]UARZBqٔ#VFTRe4̶"/[!BfL~G r,P̖ؖfp@tn{}8s)bUؑ ,k˞T .-RUy—_ mZX`G rU`tlFD@~TPN?-/bfxTu8=l*E6q3;_8s>OxɫFDH\slJ6fətԵ-◂ ~G/ f(@l`TaJzx'; D0,WC~2 ûģ)'/n@t&GŇAl](n3+>;-cy6%"quB-BA6 Cb` w=u7a(5#ϤٵmS wGzS9YU^Ȁ*s㮃cf켸d6c+JпK<$-} o?IpR^DڻFzrdbwKeэAn9J=<;Al: i#\/H_Al]`(LI3TuE.D2& 횮,'B\;}*0Z)bX`B>ɻ9m7|/Uɞ7HR~AnW9&_C{|Vl[6+9-Q  ;e;a?+{:ɼLz}bpwf]ه.|c6*]V) 6MiN^%a  -u~Qۑ~;Al%8D  -Vs]S]6yj6UTWD(mԌ[jу<҂mIw 9+l' bʋ;vaSjК0b_Ol2 3+J ĎW b?mӼ o\-_oy &$r~z2P2\gD(y6+)x'>Nxf0& b:[^fŀM䚚LN2P2\)'w|wgܗD(3' b}N_,Ø⹘nt~]6"rxX"-|e["]q  ۹S>d }K3tλ8JP$7 hE7 ӲdOxPw>]Oy)'T*FCofU&%\f;tu. ;3X]-"ӕ;Lz4YwS>"]9eome"NTt{rbwbb"MlUU^qYqP[2[b[1mT_"MtcyVz!gӫnnNm_ԩS7Gݻ;|KݼTjPwߝSRr̕it=zt8AUHksok ndd$Mlw3;_:E9y!. ;:,Lt+L:{6LB7?kgm(CƼ 7sV{?oNm_CC?qt%KmM#1|JHHHLLLrrJ*..!GlʋK>:G9  - 8s&$!b^۬Mlne&qϞu}ؾsK$2uoZTń c==:o#:u+2kO=zt.&c/ @fTFã+Uarf܂ΤOW^ׯ_+׹fk[vm.]'OlhؖI9w<ᤤM{6t\iTs$ֿ\f`>;IFw-[+cǎDZͣ#]}v6il)m6'77;w ;wlOez6l2d6 b}vwk} F[HjѕgieRKY?Xe_`*PZ\* Ұ EP  % lg! ᢄ~+̙ss̼3Ә1/)a&u V駶^z|L={Maz˗|?f &f$ hum0 WoK4v j,*}:zɓ슊X;ݷo[ZZIJJrssr=K9sBRB ,_^;vҥKW:Ntttر888oCCC̆ ֽ<oΜ9MIIQ>qD*䧟~*zCy~RʰhX\CC>|.PhgϞM8GDDt/ҚskѾO>>iҤnwZ@d[e&&cܹ:**+;;n/z,L~?fَsԩ \"+;v:#zevstwwwPHg{{{f|HHSEEǖ-[iookkk)O?ҕ+g͚6mڤ eee,_d-ZI(J+Vٹ ؽ{w}(OrP OOOϠ vAߘEk0!!A*޻woƍOh_]v1D ,Q{U5m|^]}Q&k,\}'oeevL(1mYdmVp>zt[&OK$J?`ooW o$'wrNщ=G7ifҭGfַof555'O_|Awy';;Q4MMMfff*`iiɞ#shw窱ryy $O?ONnuM&]\\}:g&stt411a21bʅ1&ܟ=9XRYe___???v.vє8QI]*âaqLՕPɓ~ǍgeeT[KjMeWUlȶ6 m6Df@^1&g*$_}su낃|MiӦ^A.'m̤C*l`<98KEE{[ 6%TVtI]*Ο?[oQڵkڬMr`gjj'5՟֢els2j-T5Cd mi:̋vxʎKd:NœKJJd2ٍ7VZ2/bܸqǏ4nj\X@$J˗/_fNK9:884Jl`6**}+Ņ7;wdsS7TEŽ_~%M"V^MfK#H_yEUZSi-Zzǎliv.{{={TTT,^ERu نl64եɱɴɷu}ϡlwvv[[[ 3gr*s{QD"B!_2}tinnN)>ؘ޾uV333оLΝ;ɶ?)߽Jk?EZS===|ŧ1O?rHVY5@d mQu\S,wBdGxO{ȭ  m6d|Gt'4)wS`-!mm6d@Orc1Iu=mm6d@ߩ];x ܆`@!۠гYۡdv>m 6d]ra4/ƃ+bgrγK׳8 W Oo[g6d c5d[ǥ. PŻL|q6a`*lC`(i %̤I>z6d@=ېmfC!ۃ)۝SL :u*EDee|B! نlC@i0cR'PjpB@|^ziرK.8zɓ슊;v077766<«y߾}&&&ׯoiiQ 7gJڦO5(ŭvܸq! نC!`@H6s=XY ܹS,fmmm^^^6mQ݅B!={eee><«%$$ɩزeP,Z(22QQQfǫ+ʒ `oR5! نC!`@4꺽wDԩSo޼ɤM, MMMfff=vuu5nmm=z4TyZĚe[,ַof555'OUyy $ $皋:UUU}{ ېm1d l0hmdd'U,yyy&&&#FQUW\h64WO =+__??>O]ĉ?3mm6d`p6l^٦_آA.ӧp2eJS nӧ B A`Ne+NSQQVY яeom6<#{ld@B ]pBWm6d L%73ܵ :KQ`5ǩߍ{d 5L|@d[)uH$x KhONkC!04䕐f, y CvZ^*c'**oMLL homAԕ4C =h+iw~MsÆ9qǓ wDW%ql$ gY>!uq'NIIISSS G|BIwGt4! 6li>7zީ_+ kG[.Zux컾XӎU%.^$ zT*MIO|{kG_t,9Vr4F^p7z3?K%ӎ~ϟ?_PP@ A@5h^ەwD DSSP(=IM'ǼX#19qy)6{9ڐmPMMznGuY T*+--NNN&Q==(vkЉ9;vyeaZgп}fSИI)Ma`RH)8 e g/2:mTo6guXOsA/( ӆljZEˎ;m6(=|bb"s2FW9/z1(D( BGiC@vwGP@d|[(dff&%%@a9 zp@!@Q(h: L F. 8I]r7a6lo755UUU7fgg@~p权[ EAQaڐm8x.mi%D4mنr1Ց: R&?&= "  SaZ(h r,IZ'F( +4]Yϳ\vs@d QxFC 00OA㚺%2m0t!&F 0tu˿9l憧 +d i8  0hfx<D܆8@a>osMz.g {{(h ]%A1yAd=X@i,*;kqlg ]t|G8@۳. M6<-p{6ށBG> 6 <=X`hnK3@m`ƞ#HzKi9dEcڵ\PSX ],7" }"m a4Ex_OGyA\So PH T땈dz\x^6)7.w::Rw@_t4X-O sצL*( ;8@@=qަ};FZ'5u{HzJ\qlZ<ąiKoqA'iuøn ]DZ'N3 6ƍ;UUuYfpj%Dpށ.mG LGs[Vyy@6qrY%>".zc4 mP꺽f8{׼CHtwڌv6o՞5m!ۖC:1b.#hnC( ~*j3@%T׳ui,*Kw ܩoW[K7hXGi9d;:Uy>k3Cn87cGk.^N `~R;a f <ŴgMΕEE|)Js@%s3ƍze@u ]6#ܵd-~DykQ;꺦OC_';č|iq> CWdik;20fKK3dPXWW6l)J@ n i'q֠L<o---od@0mKdƢ2ډ^ih  :;; m"`eR*_?2775j$}nؘsھ!i$zǼy6W'\q#N.,99 mB B $M%7{W*J6,=ۺ5ݏ>Zt6ޞ.^<ҥG.dBaZdA*ړf'NHMMͥYMMMhm#AJ PFR]O{;Gjݻ={4fOXݽ)>x˖93Yhs*6lmH4vCJ JhZy6FΘayR̿Ϝiaa~7ww`W_уe;~YZZ}m/#hOEt5m PAmF>Ak:jd|}}H3z 1JLA*QR!6 4** vu<cm66 /G͙cM28fgfglyA*>$ǏOJJήB6P1>u (:[溬 sbKl"_M8r,ogN L^I. < esʔ)SN4d ̧Ol,m@e[P[)Ԥ#,qz@ZAvDGGĜ:u/--E6Pm7gJڦdO>䥗^4iY'đ#Gϟ7x#+++**֭[JIGNT*JIUM;v3#U.NCTfٹo> ׷A:Z̚eaÒ32Y.}n\p֏?vcGJW.Z41= 9>;sAm.CEEFFRvrvvD ,P{t100_\xqyy9ue]({챷W_NHH wƍ~e.Yl֬Y|>kӦM*"*@Uعs ppp=|ǧũLBBB***hǖ-[r'$( dk٦L&9&ͪ;u,`kHR萐_/Finnn~8 6ywD VI'L H(M&d&6m͛7tQQ<߿Ijѣ˶ ^{Qɮ޽{WeET5uT*PzҒ=ATVMe&ַof555'O&( l6Q}}}O%WFFF3(ϚX]B|wwqYYYhbb”yĈ*R}UYz8LH<|pm0Hƻ`cl ېml^6s*[n3OO~y<)Unjlsohh43BũLOA0蜵XtK lC (777񮮮 ...Opvv~zzjRbLF+0#KJJ-\4))I*/_Uim۶l;::n߾G tpploV8uUSIXXG5qƪUàz0 lC sv\999'4118qVO<9}t##ٳg|fdHHLRRRhQF*%H>3i`/ [n533j^̤浶~fΜrà#ya{l ېmlmeA=".x md mFWl3d C@%73s7@i?jwY+;vxw W!ًRWr'-=8 =RنlC{DZ'N4vB 1scQ(O\C)䙎~.m8k$i t\Mdlӌ4ԤR>MN^=۸gv93`#FA~fҤIq}}=d[{ܚ6k8S^&a&mVdi9eʔSRzPdITVV~~!d[{08@x51"]z(ۖyIhlj,:{6 (tC^ )\"{JŃlC tJy<ޜ9s(akkŽرcqppO Y fR{(A_ =:yd###;;"_[[;n8`/رc.]ZWW}zύhV`hnSEq#J3$WVy1+4 Y 6)^hQdd$%(͎ppp(++{𡏏Ow1VJZ,z‚ vN. [ZZcooٞ5kokkkllڴi=tKGj hK>col?<`,zesuwwf.~zg!ېml &H$J'+**,--+V'SNy&MNP]]ѣGkOP,&3333{8 [ d!gNPӑ̀|Y~6PgA;녹k/matyXcQ٠lC I}}}O F'}osQZnl?ˡlc[8d `eEefA @wiV8yA y˨OJ{[ͤ-YQ`5d.9 d4P; @3]xymdM$%A19.]0f>ԕCdwD"POO ڃmlv\d46 d" 5rL\ ǹpc++X7T땗ܶFhuϒ̐erϸΐml6n La6U$0m(Z1u;q mBGsIEx_/=xmKx2sEeب" 4q{d T땸 z.vplԕ*VQ-{0ɛ+.zq>gim&&&fff =d@A䶥8@ ƞ#KH3݆k>>Z_ꐧ9nvwE0O8A}񤤤쪪* Pn"mc@AhnIϽ\~bIm0NC^ n %f-s=~߿٤)q2pHIISSS $@ ydm(=8@ v{ 0Wr3%Tze}*G}`/;񂸀0ִccU&m}&=qlmi9Iaq6R WӞ5!s'.P}DzCEbTappڸ~/ǍqD?9?9)@dVQ-ql?r6#xwD cg8{AMMMB077755֠wr^Nb'q~9!99wͩI.]_ qZ yGGsOJH3z';]@MʍRi]]]iiivvvrr'HDC:pb~]!66b:-<8Hv}TEJS}3Ʀf-t4l6i6#Ys&ݏ5۩$~~fh 1GUQTS/՚~7)vj3 ۠o\YsqlynǦL㱋5Y7۹ϟOLLdNh 5+ct ՈE:RMfoվ{g,s76dA}d{Bpc4d3|5鹸  o ‚Ԥ$SڰxNa.T#ՎH5U4mښJsM]s$ة+Is4|t4;;{-8m1}6TՋjGu{\"4%3o2|O_@{HR.]8@t_ͼn/\Sm)w]]P(,g&_zvnCuQ4h"G4 pY'c'n U[:~(Gcx<m4CGMxd[vy;86zk(f t\jz;++eN) 6lvp+d[c:O4v BC4JnTV\ pY.{m㱫3d{i޿X[h^ $ۆVk:h*^Ż6l6\?8@ Dvs!ًkJm E 괶Ts30\PU8@Ҝ .~T;"i%-U˸ i4ڊ+,e hŒ} f<hu{/\`Ok+"ad37l6l@Cu mh6}6#Ok̯]}6l-a|ԑ։kog =Y~>!Fx(qzu{vǹ5u-}h%7Pơ`~V=3j3dH/mI)`8 DI]CzCcQYJGӦs0NhÁ}SB1/  )Y< @o։qlE9X1Cu,WMR :BY%d>7-l[B2m GNi|Gte?ԵimC=A ۽04*̀HX>H7Ty%w=\"ӵicHi#5uqtG1pPCH]Cߜ8@U@M{WvGN<ή'۷d---J$%%1gR>gΜaЯ˗/饗ƎtRƕN;v077766[hP33aÆu5Ǜ3g%lmmSRRO8 駟d+%PxzzFFF1W{IOʓ&MڿP?ޥ]PA\ rY.Ftu=mG7ʊb[nu@TogϦ_iR'e+J'=J,iL4@ǹpe?kܮ!۠1u{ȶ2 y%\SVQ6,WwwwPH={!!!NNN>زeR>/rm`Otee)JYfF//M6,C@@CYY-ǧo,YRUU֋-bTVs({+%Pwߍ{{p(P]ꂦr]+W]ZD  i&aK6sË9s3Tr3$z u%{= |/^\^^|?~MReN@kBB1߻woƍ6pNF 'PvcI{ IDATѰ8USSC۱2kCۮ! ~*\V㗻wLuLLfݳ/(;dgg3_W}⦦&333ed%shw窱2'L H~~rsݼyIO6J>G,2e?}RZꂦl˓֚2Ҳ-Ju/C*4vȽz u%߿W/5Ok㏚7puvԩUP妲Ѱ8USSCñ2kܮ!۠Tr3ȶcqhgJ=uJÇUAAL&1c}}ץRi<3A^^ Ɉ#T.Ȉ`RYe___'s/ܹưbiWASMҲM6gGA{g/OcFW{M绻7*55U\L[M]rqWh?5>6kܮ!۠u@eCNOi g =.dbZnYo:mڴ\NlJQoIZuSg؊TTTo+3JY >}W_sUv{LZ]~qqЕ톼KƯopM]B5궑?(Wx< Yib-[ gDeonqꪦ2-O8[vm5d@!L̅=FGXXsIIL&qƪUT_7㔎3f sa9IIIR|l洇hmn?VW0u{(An,TZf.lJ(n|?ˣn3gff>><{춶6 ={(+:Xxsǎli+9nǢͤ.tĬQC62WCgB`W^Q!+eem6NGG۷em-N]Tfc2h]v m@.~ytvv[[[ $\.Wen<5ju V|vJJi9P1ccck{{֭[LLLBCCV0u{svHl'Ӣ?(>S~G,_NCqX~UΎy#G\]TMOOOʐ*>tFμ6t=fJނ4궑?(?y$u-Ϟ=ܐ2ѲH$}bJjU6j*3TyM.kܮ!۠X8@GznסXl i66ԕ;R ې<MBml8+s=꺽܂0\"C@mmS$@Amf䟛q0ʲ܄<< m0%;!5iN.Aq0sg&M>fر Y_'0DӁu[_AaЊvgPIPvgg)SçNJA0>?4}6{n+#\"qH8Hgˉml$VG;$}Y1G{l&oE*.*}R$ՕG);زe-ZI(Jhv͚J{#ggg,XsNe>f֬Y|>kӦMgN:@].Y~Hoc6T.=WVsM]ˎnae54 @6/^] 6L~L(jȇWq ]2[?thOrihh ɤOu Ӱx='\]]Ψ*GjTT,tOP&3gΤ L6t702(ҧAw'0T2xƞ2N fѐW0kh@+l,w{ ̄v%%%=vXX3MIVvƍUV)|5::劷*[m[o>Ԥ$TJY|y1WEDW^aFٳ/fgTYe#VTT,)á|͐nD^ EGzEmFE t >(DB .pbI>kܮIw: cK'$$ظǮ3<<^9s&UgܹU,ɶhooߺuIhhEK$>y'%+d2}QFSz9y$kdd4{l>ό,**c~vFUV9R]lU.NHҪ+ʚ[YYutt g. s3@+/Y~>" *y .mY.F $\SWZdwZC㱫P<|4Jn&"b M Yȷpv2$;IxƐioLh$VJs3 %30XywS`xr `n08@@+lc@8yA?Wo㚺;rC&MhE 3l1Ars*' *DВ++HhhEm6=5u @ȿh!ƣw7"s%C@+lC聦[³K` Tr3XCt*Ex?inV D@4 65`8tuȩٟXXTƎ\}Ĺ !@36JhhEm6=c`h%2AkJ@%iϳ\GKp4 "6dn ` &=m77| Њ ېm> qHI7S޾ mMhEm6}B!:dŜa#SϿc\:bJF @@+lKbS`Py 80d:n{knF863]< %4VwGЀ"ƣ!q 5<Ϡﰂ4$ґVUT+.f@UTף%4Vhu{S`8%/|>Y.Vg3Z/+ IE:1q&V8a`P]q;?6>Knv-RU@Us U?sz $fM%7-Đi?kq&VաU>1CٱӸ~~Wu-(4̅OZ'FSlhhEml<S`8\ 8!—<};Gs_q}I*tI@+lS ̅Uˈ,i`3I;]̪r@Zd%xy)0x0,U,_Kk340eyB $Њ X<~yKj{?@u[sssKKK( p Њ }79E6 i/JBj2OΨR8 mJ$V ^uY+0Jy!0xkY,g~/ر䃊O8瞳}C.Ai6>9̶KB?}yl~1F|Չ+W6=+ӉuNNNoC4 "ȶZ.avAq'!O4^iHo_8A*afJʗL]oJ6GݦRq5gR#Jޞ.^<ҥG.dBaZIӗ|gĉ4 JY;q&V:-q'o~Yė20} ];iooCY(h_2Ws.Jlc$Ψ9 pr[#%,AV59V` '<̳\DoK2?P9E׿.VRΜiyJ ?R?QZf6nt﮸pPWl_?o611133'ۏ DIA{>}fq#ߒKdz&b;},s_PO{>GR=ud3fXJW)]_khvt0{uvvjvPѣ_hkgdnB$E/6 4**ݬru^sMBZd/ZDX~566[YY=dɒ%RҞ;V+`qf{Hoe鐭[7_׬YHcl9su{%~f[CIZ[sTf٦=~F$ǏOJJήBO $Њ k>閰?+wۼysiiL&{A\\F{9dpd[K8(1xՕd,o{UE &f$ iD |Zd\]  ϳ=QOR{("#GL27֭[d011Y~}KK 3/ ѣG'O^TTv&xzzKT? *-K ,_;vҥK+c_ŭL&O%߿MV\\̤ZTsQ9 $2߷_12رcqppCT~A퀀O=ɬ_rLccֿ_՘EchҘ)S^IgGjvyAvP9u_ZZV_m24 "v) ]Fd~Ϸ\mmmJ?9;;`;wr{⋋/.//g'CBB***>|eu.. i={ww9l߾ _.8r֬Y|>i&uvTVV2DR#GP&mZ[svϏzoΞ=H|/\841a^$ro(TmH>^Hݽ$d!!__5tݺ?p I5$AdWV#@*?k׾{oD¤)A_5KW.EO>|xf}W_s扭&Inmmelԭ 6d .fnzZd܆=w͟?䰦+Ӷm{o+ KJJd2ٍ?{ĕօHwWvu+]Q(JK/ڲK[\/JUZTDZ * ."" +T@A$͛ Ir~yx̙9s|23g1Wcƌ6eOH{m+++#˗,Y`Cr_8;;@PJ8pwݪ H"siӦeddt^uQ|y+ pppH>={;vЪ>,\P+yIA!]d:P~م.0bmoUuI/Zl„ }Y]]P(ܰa39%+)䜶Æ #r}+s(?_(疖;w2dbNHH2e -6n8ʘ 936E~AP@=Id.weW^`7ovvvTƏaad&نlt@-*WBpq6z PP@m6Pk8@ZV+^{\`pF;S4ǧΝ;9*  Ŀo8@ZV+\( \~xL/`نl%i8P =bNg[; ` TgAq38ŭK!ۦ@M4OA'yG}~ L!g2W%bdbd L ېm . a!(yRQgMke嚬^ {t/ٿ&IՉ+ۦ]VJ6s ֔{܌[_`"IR={ 4 c~ǵZ ,--E"x8- njü6Lk=2W= lm$pU;"نlOkc@գc„ 'Nt6wlnnn$0Ln722>0ڍJ E<`/I86d[#ۭ&%ǒ xΑ#GƏofffggWXX(/bMMM2x J̜93!!Adfǎgf۷>ظq;'NDXXwPVVF_i&ABTܹsnݪ8o*#//u֩rUsI`[[[_xJ`}ʊ>USW *ewssTзLuC۷:_*^~ ېm].<886d[#s=b`]UUŤeԩ/,LBU)JA^()v)WZ@!@}'" q0e0HlCy5"A= ^t^v|>_+ܾ}[|s=۴+۪*n6Rv?+@߮]ҫV zğ)<,U3&/Š+lJȼ-5_@!@;y8@نlkJcwY)202nmI!*..UBBBhɖk׮}Rk %Ņppp(//|ZXަl"~f{޼yZm+++#(3K,ѝlk+ 5jӧ)M!1bsc❒ʃRP5c"зg)! Z'mwqmȶ?竿gnnޫFvtt6lڴi\.Wj3fHLL7şݣ|m޼"88XB )!X+0eʔ!C7Hw2Oik)<,U3&/Š+xq'|"9¼ Z7i1b\[Bd))'(eh{lk2wסmSNmJE@!Z Qa l 2 _u>T%ߞlQfm툃)h }cنlkO@0z٦՘?df!qhL Z@Y@6FL%lvSgM|m \+Vբ6d[ktsF;<`LP}1wD$c aΪNXX|6[-d[86d[=0lVՖ/ڵ ygm_v=}[ȶ)P_2c%`܉H؆8E!Z85@qxfPDsH}Gql g?yOl;v7m&J'`m?v(lk[gXc"MXU8SsN$OZ N͑Thwv9XSv }Ym T.^Z82TRنlk 2 n`4[Ss8k<`ykx6.-/BϛfXaaaN{?wyNqk@!Z:jg\7֤_A[b`W>ΆN+~ekؿI}iUYYJh| wq0YU`@!:&-FpyN kGPɱ%)e ;#bzk;emlmR|@h|m?F`TDeoBd}{`L&~8qA*9;37iUC<Аp3|֜f& %6툈{KKK)ec%'8$ ېms} q`=[z0 qKg[{mv!uҝGIsV~D9 1oSzŇB%Nct^Ⲏ=ܾٻ2n(}ܹ| j睈$d9oϠ6d['4'O@āqP[ud*/畄(WQ&Ayӥ~2y>]ZO}~ 5>ĖxTXF}.j)}ج 544~pZÈ?uIiOQw<ٻBM[|9BJ\d35Zood@uT 9ªJNd-rnM+{x7g9$sb.oNca&:C>I+2 eoY@˔=KY]hK5oKKnET?vE"@ (--ʊ# >x?0bs` }>ʲ|[[Qr: CMaa&٦4EAᲶB9iBEN !Ɲ8 =[Ԡ 9iDAfNkPVHˏǒlzx䓄kBݠT$uiŁ'iخvbb"S~!\ǜߝӖ1PPX(8")@ ^iP㌟dC'q@8( )]'5iu86d[گd ZD0jZ\pX~Vb!&H[c3^ ې>$C^tB>. ܑj\{,< Ԝވ r'")} ېIE gsg[; Z}aUt,S0.зɵrAᝑ 6> ې>"": mww1 vcO0ZzII`j0WiنlwY)\7"J6E;KFq\.i:c&J'Ǯ:8eGrنl)ꯣ S7yGף2>gSs"@Y{M+نl).0hLVնf:`V.&O$k{ d$.Cd4:`ˆթ9tf{lfP(;zzHgMԯ` ې~ c7:IaT56 o(~&wBXUcDd?TDI/Kg l$M]qyNRn40, |Dg~IQmvo&_X 0 nEeGrF;߉HBC_g r\0(@O"]caQɻ|1Q[c3Qt LMz 0\Zz!?n-l~&}ޤLZ$g"hv>\@ 7 !*@Lζv:!m0s~x,* 08]cG*yV.wY)(S@P8ԧEq6 ٮNຑ鎟C7^ v-F6ND^n" [V.86ٮ]JߧNy7~5y>T 7tz&M] 舒VD:} i]r~w]Na2\|b_|c%3,ӝ.ndr(Hg 0#  mUQߢZ&{* 08r -ªDeWvd7ҙEۏy/ģ2l+gm)3V\nmJBX(AX]87"A=g3ޝ @/d)>1wdL"= 0v+L@3Rf|TX87# Ev_xsoH!\ ,ƴ+Pp @o&E:c6/~=rChUAf vkn1KTĘ;f  TA/I)g?kW݉vL)zkA6g3L=n?q֮su#^ @}ٮN糛7ijma@4ު1KV.(/`6r-! xl9O*j *vMik8޼ຶpβcTpªX&8ю:r`5’<})+ wf#Ο?|4`Kln׎w :r䯜w@OB6~ ȯ;:HMʻ~Msjκ|>&J'lmhh$OP6PYncȺkj~dvϙ{edV^XT-]~0#!|*sD+{7mg7mY-Ϳ+oLq~Ĝ>Ѐff;yCNrV. @oU$M]8  v>\ٱoȄ^̀/wVS e[<mKʛ.y5dVxx8:w\~~~ee%.n=mTx0qMZ8=:5nf4mlud6J~R0/!ӦMt)\j-K-Ϳ+ow0#N(--mSSͻ=]jݭ<++kJnB՜h}yB%lkg/_zp̘O[/;zpgs >B @MٮIB%WR g= Dmf6'yUl\USS4N aĮర0f|@f, acڅ-6n4VzgϞՇ2{jpݿN]aml_;{{܌(3{@e& =<|n;aoOlk#g5&^i{HO>p*++QیKK1w;?=z7yÇ[XXH>rHmr +yyOޑXZZJ+wwwqZ(3Ѥ={ 4o ,WDiАcGs'" m-9}W1 >|1tŰ^ͿXY[Hw|6}RQĿuΟ?---dl6{Μ9F#*mXm&ڪ{nnngΜauuuIjXڍpGGDŽ BCC'NHiȶ.#ʠi1wԟ6-ٮb$1w4)!]>S0O%mm?ϟ?w6{۾'OR$]Am32x\ۮ:#D"ooPB|)ÇS}СJfffXXɓnܸ!666Wnjjzk;r㙯"\Svݳg,%K#G\h؎zehiiO-ٳ_z饢"&M94_=UIھqƙ13M\+\2҄҃Q>%Sefs8WWW&O뉍πYe僃y晞{x J̜93!!ArP,--)}U^+YQGzJPwنl+9s~b:ҒSR]bʕ1ɓ%'z0&Jů]^TMߥ_S eVXQw骾Kw!۠'Ł'ˎ2jklj]\\-[vf5SwɩsnݺUg}wʨs-\W|a|߾}o~𡇇ƍ ;ϧرc=2WvC#g,9W_MKK|著׺u|۶m{1E>|CXSd׮]T O(s2WkL]FjSJֽ^ /PϏv;nXYYѧ7Ejhy777y2;'NDXX%w%o|%+CUG/6\URS9" HSS]hÇZkğFGMOi5ճgI~ɓmjkk3toPSHc03:n…s+*&ﻐmГLM}ݬ[JFi} $կĉ}}}ׯ3B,g??y;|p&mkk{M&]]]=~xy#]d PW)ʶ^[[[9E@2 =WP"00p̘1ǎuݑlٲ8%Tq$4iB2(s2WkL]Fjۿ{{x뭷ꫯhMAqLݹsG.SU Oy$S-q}xUA~ު}QaYt vkȴC6mȶ cl1mq֮}m>Q)vvvRzʕ.33tWzƒp 8P{R/G|g;eW͝7oAېKǍGiӦx7|4G| {*/HfF}Þ+[Fۿ4O>eԩ/L{:eRY,5):~eo͛6mK$?Po>Qɻ/i0mm6/2xfoE\^6T__s1iWJ=ƩlzeOGk]]]{{;U!y9UTT$sl6:;_.;w\UT^$ڲeyd,!!!▖k׮1Wק6eO<{m+++#˗,Y`Cr_8;;e>pݻUSy$ ĞE^Q_+u5d[2+Gu1\qYHEj6,,L<*f>>SBrPy5̏[Mp\/yM i6lC>Bޛgc6i E,,,&Lg1 60SB|Aikk;lذiӦq\f}{O/ rΝC Q,W SLƍGS!y9gf(o3YtAA{*/n޼d>J+s%򶮆l+SFPUCZme۴G|2@Ռ3f$&&J͌9sSQiӟ|0?zRo59eZi j:` ېmtǘ;S^:͘z S+#:[ت9#>~km3 i6lC^*96C.>>>UUUwqttS+#Kxadikl{ j+oImنl8dOplφF3DBBB---ׯ_ GrSfD>~=msٮϤDƈI%~mS`%. nh4~xB$JO >L/-v6E.uU{? ۦLk ]+=obM#Nό(WUZ!5yd(o5͹8$N 393 ۠/dwJdޚt&&l+^{KBX:݄VnV~N7kȶznGc8X {{gK#.- QoS=г-lmm3(_[wﶴL~g---ΗJ6_Ŏ; 5SLy3")/?Rz~/v^xѣGVb]iLmBW6lC8j_twwԋݱcٳstt}Ç=<<6n(smΉ'(Fi]v988}||Tܹsnݪ8o*ëL}e//u֩rUsI`[[[gOBi???OFZYYѧwJrUJAnnn*Un(}^K%OO8XtzP&'5]v⽐2gʋ%e;99_}@ TB?v׍xl6d6t.vSfT_=UUULɓ'ÇgҶ7odǏ﹪1c§eԩ/,LBU)JA^().)WZȣ:5'eJ,:=X\>jԨɓ''&&*\yP^\.q+V W'~8L[wvm6kʏGt.ƫ]']>:'۷"M+XOkðW=Vfamdoen]jUPPk&TNIAf)1yV\$`SBmG1q֮jhW񬬬,&\UU%]9Sql6'sdngV2mڴM6؈GPζv-vhBkKKmm60*Dz:k}9~[ u񋋋{'''Z:׮]&K U{J888K>~)o[l?:o<-9H$,YDw2߿ԨQO4tĈ̍wJ*2KAՌɋ @rvvfͦkm?Kq(˗_~"@9{;v455ݾ}{…/e3VdΔ̭S 8~z--y]tԜ8k˫v!mm60BRkté}ڍ 6lشiӸ\zf̘!QSL\\̙3vyfkkk `ś 6l`XVRCBB”)S 2n8#ɶ3 |>%_-o T͘+Qiӟ|k4sF;z+fffzZZ3ΎÊk̙b+ss2gJV^~diTTɓkiKBX8#tG[d@j C!ۀriN(3{ܝқ=N^i($^dd_Z׏$M]<m_-,: gV{* oO@AgzޤZ\a vW) Q4W:::iӦ\+aU-t `0@eJv!8S[Xl6d0h{ 8lJ3IE JZ{mllrss^C .FBTtW." Q;e6w Y)&b넶Vx z8]SvlκġG̹O$G4dl >M]gVo>ds6aio. G"] t_BvMqdldS=̋91lB$q֮ں.!@ o)~ q=lUtm#ۭcf{l9:5sbyl1y>ں< @=wANI_8t*dlEۿcUkhڐm @+w7۸!UCx]%!,A܉Hv؆ O[,qiyx Ę6 ;uTLLLFFGm3qxh E 뚺Tlm-c;?{Elt/La "##IO>p*++QLǮp%r=L4t'/h+ :BXU. zĜx6mÖm8,5>hiySËnzk;emlmR|@f4Yjn nz2hLMeG"8fIc/-Í@Dr;nY€ښs֒}QsvDD=䥥L6j^rE$Аlm\Zʛ&J @GE"QeeeNBj̛+>g{ X6U cﺠ=}oL;J;w.??HDmti]@+;y"$"W['1Xddim>N~.4Qw<ٻBM[|9ڀICgMDͩ/ZT--DpH,>e/MS((   VnCi0u$KMPRDQjү;yq\JBX ȶ6 YYY$?5S) BDiy痤X@:{ԫ9\mNsfنlK+@ zFs;ۥPP@(,l dK=tA-P Zf^@!?wu#97"|!m͉JBXP<+IÛlC D-8^'5\+U·lS ث= 6`r=OhU@hvUFjJoglC *;I> 2De6+xiS3 ېm#$ [V.O*j [:c+y гvU6AZ"N_lC jۭA=JyWD\+PZ '30R?>@qvg[;oB0g7um޷ rȚ+~WEtZz Q:!&@Gp-.nT6}Wq֮eG" xB'ªڢǸV.JFa3 $d1g3zN%bo6zK ɔMd״\Zx&mܨ7@oiI^،hic1t=4ugs%_6dhߧ3hz͠(dEtu:}nWoI_/&#N1Y&-ƚ r!k әC}wz2 s.(<%Sׇ>J~0_t FF>.ifThGY'lȶR67Uw+c?Scro۸eeG_Ssȷh@V.6K[c󝈤t'(3{Ki6d@U~v]/ԹNrxQ)AV.1wh>3t6{,tD Sf N|fPIݪ@砪lm#گ/ avq & jgj.';}?K^YNye}މfǬďs>AHT8+xJJjHP)eJͺ<` uw" ) aUYkd=%$5 y;>Qfo12"&q@ O#aK]w3i$ھ/9GYgZ{=w7i1M v@d[6[ %uRgfg']+xlV"Ɲ$jB/--~L*9XwNwtBZ0]_d@e)jlƨ3xK}zc3C>o7W1iCkg;A6[e?0ce-=bs8cmh iLӝ*傮ǧzq/d4Rדण'd{lV T.2QǟTԔ=< } ?@zE%]N|yY̯3f@ϏԘhRƛ;}y﮼&1AfAa/G9o&/;մSQf\7Ÿ@dlk~3~r=ۼ+~6[ˮ.Dkri.3V^͛Pږ>m붠ߺZ.r{EtA_xF+g]=n ]V %}+s~p.-,ܶ74VM[G\ߞ=؎;]@Tx3ڙ׋"AkI9͡uQfo^p]mK7b VS͇ ySG龩xJ1GoU=Ky1'ekG;)m/RKĿ2#jSWPװ<}ZgcӰ1*q]Is=R dl7sF;.,;Dz|2?\W_XŊ h\1#K=lLy2}I}<?xQqYK .y;F]2u6n\+[ؓ5dV+Z1oQ_Zǵr 6:!6dh);z6 h߮zZ^T O} J"PB6~ nn_wt\Q<>9{6KD6n֜voyT˞Z9)~7#Ǹʴ03gfGj?2+]7bUv]R~2cw9-PrylC ζDe@YŽ@}M4}Ƿ$۽nK$|nn)C]"##srr|~CC >;C݋[W6Foo96*8r~V^XTMÇ ߸Vw"PZefj҈ę}֎?NUCiQf=r=D$ldmT%goSUh aU-5P[~={:%^}u2(w5vڑpuܹJ\NOwcFeu6U)QvG.sq@~ s ~AZCjLUjhO֐Y 1 ,,`MZ=bN5Ի(6y`~S[L߽j˶%;u!]}KӦMR.9tԩR\@4\/?7C#/{r4~R#k:, ٴv{Cj^ihwOW;͑ؓbeJ>xlκ/iF8@dG3ڙN+۽.{KjүPWUe>Ғ3u$2kk_|q,75e >LA+8a#˜0A"Wx2?ě$EۏU%gUgڼ ^[_6+Tx}̶MwOlTUJq㭊p*++Q&}?+Ӽ(beZxÖ[%+۽6b2۱gGw ;ɃJNDR1掌]SWwQ$GE@d[̚<.+E0aBhhĉ) نl.]6GIem NL>"@{+дzm6bw̬?ڝU;QmL- aL D&-v lȶv.Zx J̜93!!Ag M3#pƽw'"LMq̰֮dۂ llȶ)= S.xwN8A EJo۶޽{sΕw}l0 [payy99szʶ;ϧv1{l^xhѻw]VSnnn⻅_}մGyyy[N淾 څnhn*sm 6o>GG۷o {xxlܸQ ̀g``x_mɌO_vpG#50\[Zr**Ξ=׷@0Dnn8uZh:>HL<ȼKǮ VۗR#W_|5h/c1z׍WUD=*,Im9m6nU:Z?1cB!/I^zׯ3BIy?/| [UU%w=e[/bhho)Ν;2wZ&N( GM#[[ۛ7o2ǫqƸqںʷƆ,ZdƧ/e'%L*))=R<q._ut\Qdm<޶6Cy_]|[fjoyW$g[JM731nGu@dON!ࢾR}M611x<+by y\>jԨɓ''&&VggyYXX0y4hoI+sm 6D,ypqqaX>Cےl`Iʶ.` }k?:k@djgOW\ʖ\ ݒWt' <ʊWij)o$ZuuuWԷ\ٖ[)S|K sf͚E ;;<ے6dȶ1cWEniԈ91#Gd]C@dh̵r.j\]]fiqrrҝl/_d7 3XV99H$*//_dSomٲEyU퐐Zwڵ>@ +|7iے6dȶ1cEUJ_O{\@dONKB#̘1mqqq3g|=hڕ~aʔ)fffzZZ3s߾}V@_2dȸqBCCşJ}K(nذ ;펎ZaæMrU ǎ8pszݖ@!@{RQsmSqJنl@d[kq֮7N*v ېmSDYryRQP6:yA;mŁ'Si. NAOi~@d[d,:y~LU@'4/h@hkl1qlz¹ͳ`4h+GoT ۘЎL\@mmLجd.e|۷o/tOgrƌR3fΜI͛7[[[[XX3۷ܼ 60SB|KVd;!!aʔ)C 7n\hh(dTdȣVE6rN_Pc ߋ?I:Tмtv `df\p݈8@椩+nE @BO0NjٱohGX ,|$,Zɩ($K=v `46~Uq֮w" IE T ZmgW'F'=a`JNٟoa C;0&x?*,C &A >ʥ*9b8`ԋK<ٖN? [fnE+9}Ў< 69 98;Z6*e>Wף ^Dۅhe& Av>G;0nE\ڋ8@M{ h+ SsX?}p;5:tl>̋K:lYߢ im2 b+ @8ƥ9@;f-gY9#@'q\#xPES<'c#@XU.lA( ۦvI zm5O R;`Z]Cu_F;0:{!&6vYPkh77>9]C u_fXaaaN(Y6 m\Z^T+ϛF;0:ڹV. 7ЂAMj>cog3 nSK_}INӧ9NVVVee%J~.ږW>ΆZKMW 2m5_@;0R2x#m𴭱Rt'/J L?d'^Lxe6#X|9Ijbbb~~@ @ŲٝML-/f~kpǷV1 T."mEg[{g`ʌªZDdihh(q3~㾸~z?g;rw{^XГ%3g/ pcD}(w}ux6dHSX2<\aD߲&GeONct;& ((aJ7[>.C \q[M0҄sIMkSm0!bh$x+[9/‰V#Q`4!G*`+ஸr1'KAp"] 4Nw.ay}3̟'$$|fflllVvN xq_`?^ccW81%FGehxUbϾI;*1vm;=@FGq[u>jll4LF1--mgmk̤2^w5k;P?t!tYlM6 |٦ꖞԓgtHȩة-# YS|!郚-Keee~~~jj:Yܸ֨hzqfɪ^|CTPQVsӒHïnrlAW/QkE>z4#~UfUӃصKHIAQ;M)Of) 6$4d}t ⴴ4inذ![ ĺV{vfN ҏ2U/ȫ;'!v]B_rm=F` K[RֹSgQ}Al4333SRRi &YY+Y7YCYOY[Z"nU]T/_4w%=׫Hl1K( 6nICQ~ 7&(777--MJ[p-u5Pli l1oo>9c`GZ.JjEQq$f9gY|MčfRiKd}ddd e=em۷P/Iu!6rm׉=VH=4i) 6n)Au>&X, 4LDGJSIڒe?v,"Q-;mzz l\c)T#qrN pҀ=}_`i;?klه) 5x9t:ʁtq_pԶ %K$)AOQ6Ð+m2N6P5klju6q]է,"}9]R1?Bz]`+ Jk=NT[Oi6], Zmd96P@9g:9jpp&#.]PϰGf96X)O@8պxWD) pMҚ\DQ]v23;qwΙNP@7imXmdGrL4c@8EAFj4Z`*Zc,r&e( pRg{  -59KCQE% :W-8 {OcY6zԩ2)32 LWiܽ KG<]REi&$mH&lwBеHܱ )Tp|MEA؆niӸB[FMn%XZ`jCҧM؆IΑvXaJ-ͨiM[`e:W ;?s.m.ْ[$rso0FM~ׄ 8T#ft lJܧ2}i5 6às)]QRsaD>8ӺxWI ;¨i/^k+4!U'+ Xn *JQӀsYf4[( pHj]Of) 6XuV$]u :K*h5EQ%m8 ç6 #n(4!Io  b6tsq^"Ĩi#NT긭aHp bt>=WEvr) pH|9gYH]a*( O{mҀ]`48pkP4!妺/q^ҋn`]`48ٙeE RaPIu ) +-U[t>_Y5 Q(;Z3c -IDAT8wP lQ-g/.].FM׏T[C[66ƀpioKQC2%襞/ ZI''ݾç03l_obŊOw=LuT7_K^1|M4Gd|q]u F͖`$fQ._O^zذa2O-:g:!j_HSUSȉK l措çHӰDi{zl4^sѣon}ŋC=#XšceO>||?q^^^||SO=Q^^!̧~+ tX ;;g?*ˬ[NyʺpTT=#+**|MYx7ްX,VssyJfa!:x?wmµ+p)噇~Xߺ|l1uŻrMVp<{20۸ M+c #j),?s'Nw&M~z(,Y2a„ǏԼ+skVUU%i0<<|РAzuupر׆m???$ |/b~&''K=voZNv.I^o} &t_؞1c$ҋ/Jя~<)zE2Vmnn~7v Z/7nܢEn7lGGG;{8p`U[?Wo:-t2ۘ?t-u#8֦J%gP lI *M];jڨQ=-955u__I;00p?pȻ6loڴ駟vrr~nbݺu{offMV3-I[6>pUnP l*Ƚmh5çuiWGnFq]nHۉ l7Is`Swvߨirm( pMb( mP1/QnF8c4qFISmE:WiR [tp>%r :0vi˸68%UR8/ Zh lI̖-u2q(CE>}86Nfs'Γ&е ;J96kdJ&lA&ͲlϹ|JK)A1_?| C ;&Wu:WqJ{uŊ}Gyrخ0t=.-HãnȐ!x7zx7ްX,d/Am|SgmC*jrdGLsV&|ѻQӲFͶ&M~zyaaa^^^UUU_}UppC&LW^Yxu???tܹ?_$''… O 7 U[t>O ؗSeZSCk3 [,y3*7Z|.5?ASSO?rï+^6ly'|Һ@mm2 lWunnn6RÙ<&> `/- "UJNYD"c>M*Ae]jsyFM[`A䤄[ yx;{Fj}VrBU(l.HaUTDçH÷:nkkE=GM5jTZZZ'SSSGt>pss%}C*ct7Iey~!8jJuVBrQUr] l6ĒWl Oq;qT\xnUEèivXh`S[pӔT#%:KTْ%S&&&FMG'n bzz(RkB࿤&9qAFSmT⒅ \tg;0j}im/8 6Ks&Pm5UHZ9:n+rf/UA=tIUYDbWr1v$@@ ~SXXa  1jE.GUk'vGfr9Q l]j8Se;R/i(LUdגDǨi6DWL2Ci@:UPv0l}\,"q(6k,Mu_fKt iU0j0%.އ"Ut9Sg}9gG6Ma6ЦnW!RT9NKe 7莀}\+;f.3 lkGIq[ݨ0HfԴ^a^};yXi2UDB lvM"uɞw> 0bɜVw}Ν8,"PWa+Mc_"GlϹjΏ\Ĩi=C\ w #ܧ%9yx.]W˰6-۬#-xf݇"UtT0jZwZ F#UJ&`8 lР?mTRdaIݸEʨiۆNS]B>y~!\tIUuViHֹ5a-IqZZrp­0HFv x(Ru6%) fKYGA&%>b$m?M@lçDM5Nsݐb $SWZZOUD K"[bUj3 \{"1[`r0Ku- Z)G2HG}TNQe9)F;_SW/8*sn4K$iKIg@钪@&rn\K=#fJoP뫷nt ;rSzC q+%HiDr;|8UPV&I = /6ݍ9չ [mk!\JzK gN MsƙOfKGLr50heuV lݸ]=L=}a;LA>/ b jޓ*1/D k ඝ*(CD,kw7*e]js4[2Q7E}^4@أsrCvRI6tnCJ~8@`TA%͵lϹ?[s R,Q4~HiǷV.\7%e03,kl9F8 , ZY&Pͮ i䈞R~Hi=KC^/m 3g7B0$;؎fKrHťuy(Of${SJۀ#1u JKq]%TKlKyĤ/)slmʴ }WDmM%SnW 8:BR|S$Q*]gnJ*ŋ_A.?7nLKK3&]YrmA6N'[SQ˒Ris7"V-MSCGLr2/|7K1{/]*xU [ [N'$$̢"L6pgZ.3K[Eӻ}xk+ղ,/נ@c,iHOwH "%~KƩ2IǷ%niބgnav:4-lGݰaCJJJnnnee%J6`J [_RjN S$H+cIukBrw$lvJ} ϑ&4X fmC'K1kliDJkRZ򪣞(`iQA>w.v57 ۷WگaS6^6_L^TTdXPoքJ"U>b2wxՒzmJO}M mC %GU1_ⷴ5=ErOT9kw='[nNk|G%+ b 5jr|R|SNeRr74֟6VnFr4OçNWrMRm=v7a{Ϟ_m9l_mH 3M&{;2*,$H6,9 >b)]!Im-zal4R=jے9 w<#6UYw[Z_Yl7ۚ%7o޼sJvlz^ET)VoMZo'Q;^V.f4o l#hmxJ2$}Qy~!Y)ef1k,AW^ /kj2VHۼ֐^'WoCQZ%-+vK]!e+'uKdIY};@̞*(4npe t1?tmUQ#%;Z{a[s}_\xC]|w v5$l)p+ʠbqN ȭgd^VVNnQ6ܭzC1uƀpiKrCZt/m񆢊;˒7ݰaVq%9*(]Urho ϩnJxֹX%i˯TIr`+Z^6l24s'uNrTY/Ywe5>ޖ@ؾqؾp*v 74#Me2Ghu؜b=g[&%6k<+j,"Q*]yOO8m@D§ 0B9aUriRO%-('VmUnSIJʃ("y׶~E ׆ӥGr'k%rcIʗM; VJVp3+{c8-jey%u9b0apK*#0UIRF0;_SIڦoEEV{~4%c %r.ew,\}2LŔs)}qe:Ur69@^E{g,|^AIi6A+n ^|^5=z9FҶ&a뭘2E$*zfe2/Qo>d:$[;`d fՐUR֍e`?9-X~+U@k\V&gAbvw_Lrw+I[ }0;Xm*}U&k"Nʈ&kU3AW&?N[azOk#qT>[Um뎩z釽Q]QD[(/v7mc;LvNO-22uCg.@nw&9:nnfޖ\-yNm?iKJ=dryap*]Ǜ4sO3մyjWWv@ ~͊?ۥlx[f4MVmؓF@6H>R,y~!:Wr; l]9{`s' lwM lۄm6 m l6am6aۄm6t|[b[^ .So(y"]7vպe՟aeIڔ7vɒem= a]Iyxa]u_ZҖMNչm@tFu_>m|/'l6ez{P >3Pzk6n"l6)7ukk] WL~Ȏ̓ƪJV6lHII5L WZZ3^_l}W5k)7nk|qhMo>E͇QJV%lKNKK+**X, 凶MuN4}#W{{_фE[Jjyee; lFssl.**ھ3_INrʝn'1'U's8I/>n;/ko5q vU2).% .h2FcZZ"T/VۖQ| ջȵoYrjvaŚ-Keee~~~jjжnߨ5%+7.Z^Y˧0 oʹ|v$fv)iɼllll; ޼&tÆ Jzu[dddI 蹼m4333SRR`I@vbm-풭m$i6d***MKKjN7w ۛ{llllll#I=fsee|I;=HBEHKN ^Erd+{lll1K6@6a@a@ lmama lmm6 @@66a@ l lma l lm6 6 @6a@a@ lmama lmm6 @@6a@ l lma p$L'n; 6o,7u-yU[("6=\smkؖ-_j8C-o[ž$'}Qap'J&;WH5ap'ngKޖj_%@9ìEwΗ2\},y ap.Zyح?zJK+@t76¡HE@tmڑ/5(rᰰ'{?#F>mm2Lz\)))t...AAA lnIFF鰰NhhhVPP@q7%E onnX,&h4l'""bȐ!FdaJ ...flիV  ŋiii,,B l2(Ipƌ߿g}aJ5iҤ^{-77ך)F6a---'x_g?_' 4(..Κ l&3EbWo~VU*Ubb/jm7FQTT~IwCe$ooܸq޼y?Oajjj~~:'mxY)--~f/_꣏>rssKHH읙tnSmג8s̛JOzxs[~l6SmNNNMMM2s{a{޼yJ؎я~$a{Æ )))& lvww/))̕+W4lgJ~Gyfꕕ'au@@rm jz7H^^^oM4im6#GZv_%..NI*9&& @tS³% image/svg+xml watcher decision engine watcherdb message bus watcher applier nova glance ceilometer monasca datasourcedrivers modeldrivers actiondrivers plannerdrivers strategydrivers goaldrivers watcher api watcherdashboard watcher cli scoring enginedrivers API call RPC cast notification extensions workflowdrivers gnocchi cinder ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/images/audit_state_machine.png0000664000175000017500000013326400000000000024700 0ustar00zuulzuul00000000000000PNG  IHDRG)tEXtcopyleftGenerated by http://plantuml.com09 iTXtplantumlxO0sJIRC4ɢ6@ZgKul ߾N%-voV<73R,mXg~<@OV'P\6ȩMƃɟ292;LUldเPl91*]> 3nMyZ,E33|R&gLKI,~rvWKr&" Ej!S]_S򭕟z UʗZs<^t  *uF̜˩d[ST)7 c? vՔ4My~{~H:(1fm5LSC"ءFεێv،ڈJpxm83([9FPZ2W(Ȑ(u<{2/؎uvlL?^!N<h1Q?7n7`nY IDATx\qLܬaR#Eß. 66I#!môfR)!pJzP# 1DAhH>zmpD8z>s\׹}SfA jT H5]*##cNNN;+223ydV؄R%T`"## `q'H5FGZsnnN+++z:u-{i={vjjd⊊ @N3f̰haaaj{t:6mZttl]jG```RСCl٢αH5ˢƦt:CH5ɩéfڵf߾} jL,Մ(kӋ'@jj˖-j6o'@j+WX ۾}W_}UPP@=R @W;qDR' F}[TjBvv}׮T/GFH5Faƍm4uV4c1w>}1Ҍ=Z"iù3@0?xkyFHOXXwH5BӥFGGϝ;4hfx{{7g$(FQ#/WTTPFTmrss%(הܢxN2LDʁgJd`y2n#X 6]n]f񜤗&Bn@4B" @~j6oք<<#Hñg(Ԩ&55u߾}j7!%ȿd5Ұ W)++tZ$$''Մ<((hĈOTƲjH5sj>7nϞ=5UՇoC=4dȐիWfD߀^z)-+V8p֬YUUUmyxƌcii9lذ'Ӡ7~ewy^ضm:̹s}ѫW꽶>LO9Rĉz5ˌ+ Ж-ӧ˴=JYYY;wۤG86R :-L2eӦM#dcYt狊^|//bڵkϜ9s//-6Ҫh1{GiЛe!=˖-'_}_VZL:É'6_ :NƏ7@[~7nHH3gNZdBm2 ~c#sR4yj闿/{'8~ҟTsYɓ'/6o|||8x/PvZ++\ ͛AAA=ztLLhmZ϶mF%Ï3&11k4͎r 4췿z/cǎ#G7߱jRٳG^ҷoߡCJZ[r 4QF#S4M5/oݺ R饚7o~G-MA(6##u R @ jjH5TR @ jR H5TCH5R @ jjH5H5TR @ jjH5*FH5FM't-#vR"T`*OXFM5ik oF~I 6JHy )fv=:E܎44+OQT`yi&IT?a9CYR 8KM]Rt;r)K9BYR hon9A4i3&d+fףn4)@01u"MU~1.~ڑk.&@R AJIt}N2?=>;gE\LԘJ'S{j:ɳd.d^fIfT`-L2@KgB>ؘe̅Ktɿ)+g)T`~u43|GN_r|cngS***jjjX $Ҝx+zK )djG6E"Mּfij^.çfteeejA /n}cF {%~/==`R ;'萞i|4}ߣLMM%؀T`Ҽcb XK~9tѢׯ)_=ި^ؿapں/7of8H'%&8?k?OHqhzK57DY>gTTWofV'oZtUj퍈qK/=/o|H㩫xM:M׮Mlٲr]=ˑSn.UTT>T`zt4-Fv!%\p`~]TvO~sU|i`< Mԁо󡡡f߾}\JˑTsaoxR%@ٳ=ےjn%CGkq0-H5G?Z-37oJNN.((`w H54)S5mI5555&y-oƌOM kKJ߿_C6ƍ@9If<|e@HHrZvvvYYH5j'l.%j;t(g%%|{ciyz5jdo]0`jI5[lNMM-..f{?ӽyVM/*߱Zvr;eʄ/^Mː#c#@ikt1|7wᴴPS'~[ (hY{{7l%@1TR TjH5T@!@!Րj@ ՐjR Tjڭxw4OV~dojL[ǷvY.]R H5X{HTQ2j@0WCTCH5R @ ՐjR ՐjH5 jH5TCH5R @R @ gWw--jӭTH5 #Ҹ/9z̸aC-TR I]O{ZI"MLj OϢfX+@0% յ;,Y?f˒jy/,pïbݎ.TH5&#yqS{T?_y5'tGw1!PI+YzN9I5>oj4TnعIITR_y|6 q1!=K:3_P: H5K&e&:hq%+tJIrRTByv~]Y+op|TSEi^Kc5 յRkvQ j=in^=5wY0yNB{Qd"tyQVo+FApU@@2LAjK.Qt#1n3(H5k5Ҭ<콼TA+Lq'NTڭ]gR<*N(QX/ZiN>T@5zx o)4]z⨯gńt R h 3i$ؘJ3 MB{՜B4N<;?WڋPmի#;mƼw.uܝ 6 SqBcvz㮮l)޼ysAAA#F~R 2}?j-{/iwj>7nϞ=ՇoC=4dȐիWfDm^z)jŊ5kVUUU[ޢ$61cXZZ6,88- ;6#K,:t՚5k O˃ rF֪kHNtjѪK.e v]\W~[aʔ)6mcYt狊^|@ZQ\\<\v3g56裏FFFԜ;wnm</\ɩP7oၕwW/֪T.ٿ 6 I9xCnl>}G_͆#O=N>su׍4_<_RR⾚C9plP5黫Z{&썍Zں̈jZX}yqttXX} ݴ#|A.T+;O|#www'BCCg|AUUDS%Kťy\ז(ckkk;~&̘1CZ򤄓{ ;0#Vrrr:}tjZ8|ɿ2Xk.)+R f|LrkN>IF5acǎm~xUllqq$ GGG:`}\8p[@ڵk _dLq͠ {{~=:&&[6Ig۶mFnjk{F% Mܻw}uVMɓ"1&K4#XpwQ j0e)G|vӊ"(Ώv84j0V]n[]kݒjzysVQ$U*:<-?PP]KA3Օ_qC)H5T\䚻,F5:i' a/:ٿ$PW~-k^@хJ6I58UF581ɓjDjeу\3}?.dZmVvl<}kjjg<==~2L|'6-nի׃>6cH#zLRyH>׋JVj1jڸ^.Mmvc۷oW/_ܻwﲲ2֭[w9j=N3Tc3 Xߠ Xx])4~8xzw]mK ۶mS9wܣ>zզ =ׯ6lcN?y+lmm8k,z޽[Ƴk?U*ӧ>{OC}}%Kjeef͚M `ccӫW;y;///#lm1c(w oMMܹs&=Nf-*"{*֨^wvӸYsDKt򹜑g, IϲeGZU={FT?Yo>pEELO`gg|E{'L?{lkjkxM}-wĈґKIմZy'N(-/⻔:cѱ،j@#F16,,~鱴TcbbF0sLiv߱Ci6mXݻjkkz)yF]WȘ4iӧ7W&mL5ʱyGhx233===~#Gk.d`_Men^-/t4ڭ{@<;?y\4]hot_{e?zh???[[[d=zpҖ%|!Xk֬ygT]|YRUG7  i8ɾj~a[JjՃڲSM["K$_*u*O}kOvmmꪜW#=B7n y䑼;]\\dHFǎ{[oٲECCC ~tttMMӧO\oVZ$ôxK'-YYYsmz Fhxf̘!B^"FƦEL4i.{ռKmO5m\wI>Gm=ʳټj=Wp{y דkMfho1;v9rd}}}/\v7o ׯѣcbbZw}׷o_iKoz+={5J:tM4ս{6662/0éF_gyr#4|MUCum[  T:dI2g$H19y>zh ͛WRRrYgg?F8Wn:콜%E6?L򌴴OK6##Eܣ nU ,,R  숱vՅ\p2G~5/:j]Vk7|tKQVq'H5t1{SI5n5/ ԁTMbo;xkQ U*jW.r)-'Gk7(2j<0@wVk 67PHTXčnAT*:\L~`R< >AH5%2|. AAt1!=a[ԁTM]# EY9_ND\c}C Ws-H5m],-Lߏ`*PR =]Cubm{/&9D'ҤL ]i^K"E)͟%P%ݶԁT)ٛcV_y铥IN>Zix(=QV\T{&rSLyv~]h|~ `jw`yTQE1nZ5yaYn&ehzdKGr=ry&ep|:jfNH#I&wQVι˂ʯQmQ_y#rD~!Н?X<']3.%hOͼSHH5ct<;4);\(?(bGH#.#8ɬs R<``D.&Κn7.~ڑkZɌB ·wYOO䙝 \NH5WMliA9K(:b;S\$O%st2ML9`~c:cKtҤ̊: 6*dGӋƒT&X΄|Z#1˜:#x2ɓ_SVۗr&%iɓRR 47|ig,0<םZnΦfUTT԰{vӨm. Hs5gJ֮!.\rmِܽ_JAtbm|g[C"]͋9%rԬ:`.iU*@7wͺozT[q~/==`.%O[FH5vNOZE!=-(ߧh>GJp2| >AH5kMï>;ի)-,,ƍ{vȓj/L 1_{ }F-&nwyԁTR QV/ϙo_U3o֬155ozcz+~)/j1leϦNxk֦t{lY9q>NyֺQ_uָtyu@IN@]X}hۗ]\\Pr$a[ԁTRr$\nSvn{29oG5~g{6r7j;j)B_o޼9***995:Sn[4-hBߛdre(MʔDF.%=)߯if0!ݸw?ֺ+BBBвX=W}}I t0u::>)/WϐQWt,tZdo]0`jI5[lNMM-..foĹz>}_ !{;SLJ մ1f͚n'~zdZUhh~#éFI5Ah:@]P޼ysAAA#F~R =gKWZ?x1A}ѮN5K׆m߾*((`^CVJϸq٣>_[[o?CC Yz>LO9ĉzuY~aÔrrrHHH3fW VRի<ϟ>}Lۃ>+(G6UMMܹs&=EVT}}%Kjeef8I+V8pYXR TSYO|M]wlt}dMa'SLHLՔ ";v|jtD'ٴLi&p"K.uqq9|QQы/4`(,,vUxS>}Zy8qS|xG###%r;wnzUzxxG>Ӊ7nܸz9sZ|D dʕ+d\2o޼;]H֮]|ykmzPʕy~ec3}*Y-ԇ-D2O:ݻ-u T-],?sGΜ9i&77ޥ Vkmm<;Eo`6^|A{}5GE,]kHFU^ ׁTR p9qDwwwɦOJquu=˽K53f̐Q[[+{LyRU^^^k"蚚ӧOO>]ޫ-ZW3iҤŋ1լZIFޮYkq$212y2ǎ{RR jH5Ђ{Sg,c6?)66vܸq}1&Y 5Wd PAjr8fle,bqox/<]iT<(]I|^LjU5T?6ExKpʯEht t EiK]ik)MOv(ȂuJBHyyrd„!%C+$YBR4f$UjoxdE)ń97Pɦ 1|Xzu>}od*7G.6÷}&G~|rt^i&8%nNwT٩P )~JbݤnR=)oȓ_P=\MZ.)~Y%(EIQVu ᒴ%b#ks ?%Twl0uіͲ97o><((hĈO`ke4ڔFrz %y\5찼uN W%(Lg6!PSV)YR ʙ9 Rc)PbU$بI麠MLkJ^/NRg,@SvIԗ*Y$>M.wR=4*61''G++V8pYZV}ggܸq{cv}z!C4h#/\4ý+wr|RÖ^yȤܪQ⟒SR@eU)Sex8y{}egemQHjMJٽrB x/ɴY+ܰRZNY:|E,oxzzt:-|׮]|̙+Wxyy͟?űM2eӦM"wL5K.uqq9|QQы/?M(Ge+'}jr~Uz P=Zn)L*IcT*^Gb>'%bZj~5a.gI֕HFEY9 &V,JU0E)_߿oooIŋÆ k>ӧO?#/_RTO?~\!=KK6j\Q*ܰS9Q 7Mi H͕*;h̩c `Pd(yk}C>yaʖ6`/2TC7MݻwQ-X DzJhQO;V nQyTڮF ;aZi?c޹TcZke l2D~yl^/gFEk3۽ЫWQFt:C-;gsu׍ׯ+ϗ&77yћ׋JRm*X%HOrd3jjL(4Xp5(2QdU0Q*J^=FG^^SM`` Y[[{ر_]oyKUҌLe'rY^|qr˽hIOo%AEoDr0Q\6JK, .F\LT N5z*O +oJ>5lԑj`Ǖ=tZtf4;E)OH\|4ŎtN_c:k}Ɂ=9J5o2TӔjK>MjǾ!5=Ws NG^T*8i"+g_(/)'ׯ./S7$@"(6_, ifI{srIiП '?,q0T3(@|vӢ\|e'=Ӻ.9`t˩ յ2.oy+%vr{T$C&:($# wYdIԿ{?);,Y'?f='+&+7xErNzú,c>V۝0XwWf2}?&|g_){JwAJ-_]wKJvi;y=6iQdbּ( 7tA].ŗ:jVI,QgV-.]I4F}Ki9%LʲP-{d:v˜O"4]3S͑+nTsRMs%壭/ߺkT0L"N>ԁTrzݶʕ|{w49+) dq(KC'?P#[p:A\Ջi^Hs߷?GH5?'ׄ˦8㛹˂So@.g#u ՠ8;TKhxSy*uZɍRe'yFZ3dyQ"S3peJO/"sRM+}:wԐjھe_V$6@SU m==4(-fT^n!?qW˿ny9#̉UYI.ʏuvNmV{P;7~Jh4!!!7oJNNhߙ9Ŗ-4bm{/?+R VMYyI|5jM#iE&Y.a[ .Ӽ*ck3Tڠ1nk,Yw-'D+V݊4NoF֭[%lٲ%:::55 ;~uale"}6#d CH50 v}*CЅjտfP`Y4\bm%^J༘r1{%Q'AqU~eyʡ_#  [;j$Hl]VV 1 rd#9`l5\j`kae|ٿ ä͗Y8+ܰ3uƢݶQVΒg-p>&k-cdTTTd|=ߛ7,xK9.&<$)҄nI?e,5չjOo%/j $ɧ?'T{Diٿ&4)~yae 5 EI22|is5U4%{jly 5<ƾO湨_ny)-߽&ۮ@M„YUr wޥ&Aip 0/R])f 4grL?ڨX{(|LrV6eeetBEHK367z_쿕~I5 bi Ry,Ld7r2u H;XZYR.﫜J΍όr4ykq/w6kyz΁.CiIsl%':W5c&MQqB'[y+.V5r7t$A]ȣ&c7H˛+|tI2Mt)Q)OZ*_vz 0I$Uc}ńtzG+-cyȁ&Ԕd&tA%Y&΄AoW~&ڍ`cld?A)~\Fcu ە(K&?[SY.]Zᆝ>4'~܋=#X䲥5B յM?sH#Qփ:j2i {}r'flSIx/. x tVIr e" 6L9FHh,f_YS! 0ig{Isy7XOvJ w:5Ť1@&P5e'ׄ}jt93E/%w4w<0 #Nj2!Cy. "g)OK(rgrk5m='W;d $=ǟRP)7(ܰ:jziIkx7dWS3˂y-K {RZN-vk4YLW9F3w5PHKG4SLTI|nZE {MֱXb!JabB:09u׎.49k^{)ܰ3}*@1[I\]/*0# @}卽O,d0B@UyHkB`@+:>:jMc}c߈KcrH(@"FR`dkL=}jl >rD'' ?@1vl(v&%Ki9y bE&փ_|#/7Xag W ׋J%-SRl"f&Z;voDif]ȣFlΛȂu=-)4QRɓ oHǖkI{%k^A+57$LxpJa"_T3PqB5Dm8\)Lr01?PgmWIԁTc2k דk̙SEǾM]kwzpWU7YY\9Smt>&qjL@Bh<*K9B)]-ސ:cu0iɓ|:%ɫ'ׄGY9gs9<;_b0u b!vј91nDtKZ}3hƄ g9d)sqU:u / 36\לWvG~5/:O?`R(w3M F u re}j&?dRtoCyvwi욽k`}u5zp"ɓt/=&l,ڱtRSSGƗkFpx.b3nKg|BHrՅj%=l^ha{/5jHBkoo- &ܹGhʳۨc?j:qj~bdâցZ;w&Qg)Mʔo.[ YUi^KwOc^ph1ƈ\Jˉq7kڬ^O>S>>>`MM7郞n[;\¢%nMWWW?#2T2zUw50m|1Y &Ձ{cΤ#L|kgɚm <щmcK5қoAop7o><((hĈOSM>jRg,j/(zsؾ}޽{)n*jwnQz~?c:̾u7$wN Ƙ۾Pm[Ud5ըPDY9ߋkϰ9ˎT*Nbm܍FZgqٳMڷ~2dի3{޿O.y_y1cXZZ6,88j{ڼׯYvttQ{VXakk;pYfIOtt;ʴWa-$)=3g羯9KJJ ]'))Ņ>UKkƍcǎ577 Q/at~hh>ڧ⾠Tq i{{ڵky/TVGw#۶mmjqM1Ɲg}{졠gذa/bnnnddɓ_xqSoSFQJpĉ~TTCG8}'^)^-]fAA*ujee3.PyHLdu3>pc>UM``ץRܹsFl|^z)++޽{wVug}6>>ѵk>=/lgIyTCzxxH$z7o5ks|ǎNNNW\}6 vvvR^:~~~d>}'nX]]Mn:F绻 TH JRCv+}g}}SJAiQEs~ywzpBsJjPPȑ#-ZTSS;gƳZO(bvHg=ORw{>ܽ{7kL*|Sdd(-k =,϶6" ڡYzLZK<ѣe2/zAU[L8… ~YYڪ?A57[[[ /O?g'-o{;Ç3.]bo޼9nܸޗZv_M;oF^^d :3&MEF_z%:t(UNO'L oS[RUo,G?jƍlRU[YL}Zrҫ%%%-[v)+?}<īNY8є,(BrPf^T;Թ&U38P9{^Ymذ[՘1qjyYXX0;dU)**5jɓSSS|՘{\ /yLzz_ήcԩO2ޥvh@555K,p˗/'AU"&I0ߔfUMXXe8-e.Gp#>iٮPjAAA9JGLUCʹxڵs w ?z޼yS5[츚oy1rUY5(-&SOY?ᴿsNkkm۶tP~O3ddsM`TS1iZV6"}6:ZU3ԊOظw6իT~˙<}t3Ǟ={'cƌ=/ę;vlʔ)C;v,=t&W^"&=<SF>ӦMDJ?S} /lW(O?B*PƭjjHt_I/ȝ*v~*>f2aT5(.]SGlŽJB 5mz0ȭw3` ;wn9 "kв``h TNy՝?mtq`ʬ[իNNNu)L |@I lL P5"8Z߆S?R?` 3f̚5kY7t) ŅH"Q?16pCWTMUX!rUX$\D!ڭUr`P(| ,2LWKT5]-Rlv.\םkĞ_~ ݲ]&񼼼ɓ's T:gE3yp BgN틉%6Ոt(bV|)jNW`vjiiYUUw{xѣ;H"8;hp Bٳg=zTj# j=N$J*jA) ڴ\'-T" )6j??xE҉e!ctˮ :{/USO+kڦ;w&O<|p wwP`)4|ꫯ B5B-,P ElE,OW˽s jŃ+.`.W5+VΦMt6DT@ ?5k:::~9s迪 4Iuj߿?~ &оFH"u;,P r)$&KgDk#A*@h]q>Uj%HjڸqرcCBB XXX앷m6f̘~?|5k2kZ*Nػwqz'Xw&XAżBCC}wُ>n#XO VUM8OIե,kwđijgT$*++,YB}cTPUkT;vLO hgϞ=~ 6_͍>k׮}}P-SUzquu<}tAV}\%@F&@S쿪NV^8ܑ;aj;o3ό((>#$U)vgSO hg…(ۃFhѢ_U'R0,gHՐ1w5(b F[xETq57o?~}Iz?еtqƱWp_^^ξP&LO"~]]O)W* T}WihJ^*\_UN Oz&٧.=ϙJ#?cbj lmm_ը*U|'JgzHlT쫛> f 5Ks4O:Y{pGjg]=Z&>}o_U !vY$'es 6R &Xڀ\AץP5(b]A;Tal;J|2`>OM"b< Ec=^=v܎@|WjJ4Jr4Ed76NW|UxaRI%[?hL@’ ՙApX^}ݨ>CRkkG> ;)SH$n}\A;Tu({+=( gSP̪0[,ʴ+x;R'})GS5|.HDGGߺu(XskrB,[YYCLU*#G9U%Շv E꾔ʽp#SUܙ\r} UOQ,G@BU~_f3NSL@5U$lmuN*P5R5ܸCiӦ򙜜+0kjjo9;;WTTttt?~ٲe]\\'@'N?޼yc/UU`g͚yfqY-ٔ7N'k…?CT+ew*OSīj\bv%K(X*U$UIMLs=2UG9s޼y ?tuusJP5K2My!E꾔ʽp#SU###}/;IJ!:vJu y~OUçZq~[^"=TM?HҔaYZZVVVլ}v(O-,, mmmx B"23I?yiǎL&Oh}'R] VzgϞ=L)HgeeCLUɓ %c[xF͒V|zx!E꾔ʽp#SU~V,ӧO}U)SG#+p%x~OUçZ2 3%X#̝^@((T/_㡭5ui/sfjYF1 L\ E>ۑ T!ICraOZrY|P5p/n/]-_<m/CV@hAa>>C?8P5p/J}vQ>f;e pYT9_Ԁ&OvC>3e 0/AVal ]ߦj곋O r% T ؀|P<Ŏ>p>` Fd9 (@!Wc@ƈ cVaE^M nYG0!@SV oUff!^Q)6!kLvCWg)Z%7(,YaptK>@hBApEp4J)^]&ɰ_hi|"gWmAOct7~Y]#J4 z}6֣0># +ð#m hQS+CjЫᯑQ)"ssڠn;f3UcbnmAr>czl@woH,C> ׄtG$[74!+f8 BhT[Oɒ(z`Q6Z<:w'?`^BbP6P5U|a5`E tuy%>sMjE{CSSg]Bm@SI%)6Cb0xpt 9dҐ[*rEU-j'aFbG4@`# \ AKHEJT1R!G^C&4QIπ!;᰺fB$MCh6i}9ݲ)6uF=DdJ> `LĄT`Ph*Lt{zBEI4> YUE*Ä~0[O2W6! nYǙ~`&D|h4T :73 P*P5P5@ D-KW :AWשcILBմI-] YPy>`̺xg8XJn\EV@h0!P5@o!IC`(4R!@(l}#Mc~9 5P5 DVu Wt6ݥغlX2E\IKtCs gWm_V 2LH#D;m "89 (OZ|Q܀ kRx];bzBgs8ЇH'LOծ8#XSt;嶞FԊOG^ d)A+w1c Yʕ]"o-@" 7>|Qh†ڝMw̝-|u`ēP5Z*Qo+RQfNEp4FΙ$h無`-s:q , d7ΜkٍyGe:w';CYi"LwUSDV {j>Q3d-(Va[{]뱡^E>04䖒qod\`zK;.E;¾|a@d[t_) }o&;6y U?`L J©.r^W:ӑ0n2OIG ױ >@U[h[E:jL մ^ 9,i0pJ*(dQ;ᨅx⬹LV3P*&HϛƏpOG${yOaz"䱋8V|93Ng{ =RYPțkj'ھK5ag .sJo %|5۱9'/Ak\PBpE=Ogz#[>? t;/8FˁAhGI97gEV P X Rm/JN8>B :*6?5iL\ L/DfLE=.-/vk]ɠ@2;$ :å)~%^xu_#gԈ$/$Z(̔ jLeNxj޽6iQ 'fI7 㯒_ 3M1nE>1w30mU"OESI YLB-qyg?؊<\ P5mzj볋]6jLzE9t qOYwn~+voܓs8DY=CQq{m_E݋$Jl햷]T̳붉'-b;Öj7cFelē)NuOV .qt6eуhp 'V3G=3?|)*'q)mGO {_9;ϧ| C MKVEɓ֖ǐOܠŠBApә~ux9Mi8@DXFVa7, ljI0[ཞ[p|jg7M/_R5}V{kbc98L?E!,q:t(55@"477q{-\…B9sg8s￿< cǾquS>hS߁iJjJ|>**:hG| w{{^n7KձKl*,nb^g܍w84&,RhIԑQ&'WtjZ%7#QO=ێ̴AUno+Q Έ Ǐ/))s)ß 3ܟPըm[_?O?=(>tƉ,>nP&H`CniPŴ Fm/p9hMY%ݑs9k/ٟ9{MKw|Щi/|#-F=Uvo +kC4gM]mn$꛽LHHɩs)trOGLT]SMцU~5pIՌ4OC <ݠMT{.[q]C E-v^h*6܍W*6Qglͦ˷N{}!~Ĝ|@r} Ag91`Igi֭_x:2q5om> :v5HHhIIICC7| bq]]7̆1,jm؛41fVJJM4$+Eq6QΦWc,K0wrT 10oxri52f -M8l&0[%"TU!ŧ~b:w5TǞ|Ҍ- _ >111//E!;*j2?PFS V  :qd W_edR胮lgsr=6H MOUpHslK8ā4q,:5CU5 ꟎hCՐ/P~}BM8B"Վ~'2#F'MO}vm3qة_}xw]<ܟ0Te37Xr|Q#zϞO{nPLMZ_+>[ɠ[% \g1,:#X)Lp/U5Ubv8pXi=]RpMpMwveدLq5MD J477~w$v#O. ^.JՅwęe'd+XLLЪ*#97^y~[7_[J5%`4旝]%)o]z>D8UM{{{mmm̄+{3uCP;of w&* p&_`ڻ۾Q`QǏнPy4#IDAT]}XRA/AܮD_zm 5l7=RqMp˙+Rm=/Ĵ74MD Y0O)$IAAAJwxǠf$|[5,VoB)txlA>: JaM}vQr SQ5$򒓓dBwE||(+g!;zZ_exb]Bg'1ڧ4S)txlA6 ]m\ Vt[3"V& NRHLgaBU/gM{` F> ';>x`俈RÖN!Di6UBJ'l! ] HT؛)Xi‘s|eDEU[ M{`F>?~XL5a P5jLE }UJ+${( nvJn Oj2W"T Or=6T  +  N׊O?FYմ74%Z 7wʪӦ@>ixQtjW5)6ѲF @_T `4*Am,G$J|`aФ 裪I,p3@ׄ>.E>`lR?i|p9{MCn)hY]#[xƹUXA2&)gZ6`;Qjhu! MURlܩo#OƬjjŧ}P`wUZ.K-]п󠫛Z3K̝볋'P5F`MK!1=_z@h*dɜfRW= T l톾쀃M!h/Y?T&L|>4U50a@%>j h(Zs'r73 'SW5 8h-j@K\I˰_|? +fYI=P5(6i4N# Y]㥐+Rl7E`1T-]0# l6e7#Y';xaN3T X_; m ,D 4 &7"TM}ƤaF TMߐS-!PMZ_j)gjC-]P@;E>MqBdnPuCqwƛj$ֳ1 R()2gAW~y8As]z~W`9 3 U3 J}wbmoYWcN8F>YdJ"jW\{CUu>#(~n90[JvxLVɍ [0f7E\‹j4IC;y>|6XӠLBA0m`zfR5lư T?)jȐ!W3%Q@f?P/P/7I_}Uڙ>}c}O?3|W={6l؋/9yd333/*8ٻwqN|WS:'""=944G+++,YBi{ꩧy睆joo_v3WX&NX^^GEE1;tQU7;1oSL<J*03I`-C|wr׍zzTM)Q.tyQ<)_.JΝ+ .\X]]4rE0Ι3 H$t͛g͚p³>O.ڵk|ҧ,{mm-K/u޽;wx{{+ƍ[ϔ^j]hΧ{ݻ?cAx*|x?ba g jtWdzk1J*Ä+J}wjv"P5Ur#E^~"[S3zhLF\>ep_VV&Roܸ췵ѿuuuÇ킹Ox'D^zU-477[[[+ք oy>qe˖Npp0e399Go69nYUy9{A |bkҙ N^!؀^^U1E9#Ɇ xgy+U:^TT1jԨɓ'փ ͛gaayȐ!Jp ҫ;vM&gΜItqɑ5511>' Z nY5a3%a3Q/P/Q3K:$I"G\>gT^^=@>B\)mѷn x*Occc_{5ڧ"hܹ?6xԊOg;` Rm=YX('+ 9CIx s^U]G5H777̨7\\tttoz瘃TTTra555K,a?U?ۏv޼y}>< 1c(LآY|)S+YY|HcsssU9v}eСcǎ%w~-L'0sCKŴU38u PEg3M9#fX,~Wigǎѫfws>`_vA[DkBXZ6h*,a^^U/J^ʺwޝ;w|||٪^[[uVGGj֭cH$w6obZNdd$U{*:` ]-Rm=Y4Eb|>F353P5RH ^%l z*ѤIG)okk>|wxرcR666׮]}[[K.1g޼ysܸq?3'NpO˺Ui.XTMMѣe2Yh\6 Vq^}N c0c |]e@P`-&c- #f{L j4 M%VuBaay,,,wC akv\]]B!>|-s{9h?4>w}?ٰaeUF\Š޽a &h d ]-EX ZLdk7#3@hqE G'FGGߺu'OV`?~|ƌP\\2eD"青^{-''cr LWfUq|Ey)I9W\a3mkkc)}$B iBLZLunŶ"div^P5څ釦t&++%KrAAAtB'`nL|󍳳3{$,,8e ]׿ugg'sƍ]\\?Nf/ryt; (|EyH777QQQ3k֬͛7_^h{YU9($IB$` &e T>"3!Xi74]I;/~Ĝk*[.KM!,@@FP(P:ձcǦL2tбcdžr?Skkk P.8"";~8{CO<ĴiD"_p[v@@5f [zE ;v0774yf}jjӧ򰃯ٸq^VUOPD[(dnU;k1YIK!1$c̝r=6H$oL*,@@Ft;Z? If<=9` :V|:"NX><5k-%LeVT-@Π^j1S7k &}^(^J!h A{[Z{-yo02P/Fjh&ڭ ScI-T?nMZ_h΂ NL/~- Fc4_PkT]՘CwPrM%"2W&Z{JԘ Ti-z9{M$) M]x[Q_(fV)ImK ,jnf[jd岴zSn̝LjRf1THҐYр.j<5b7{/ /׊O yήrMt9P5d9 r5%@o,`1kiWdVaY#d;\ <1Ttu۝la6|Sm=%QbdAuhR&D2wʪ)i*DVE>۳^hʶ> NgOj@06Uål[Ԧ4>+6`4\z &sN8.I<;-].ĠCB6`-6SV׈00T?i,=ԏZDj Q&ȃ,GAPdA|Q0:[]T)@(BmdPҦFW˽k l Z҈\!i[ɐsY`vo7Q՘XM!Ґ[hBGSIee*t0U7ݲ [\K}w4)duX$ VHcLdžs'` U~[.TjP]Ԅ W2-Mqg &~U*YV|a ihܐDϮڒlFJv_*F[<&8Z"cH=jW3N&ڍv_:U_t:j'e^üAed-ٞjWs#i(DFp.`Ep)e'`h_z Ku{ 3kerwP$Uă("+W*>t)8≋HՐ\FhJ*XkYH s@#k&(SA23< GKJJMb;'~'$iș)FP5P8@%u)6TL(YwU>>#shؿ*2" c!=C ^P5BY,fV౫-8;҅J/("(f8K;~Ji`V$nja^`e0B'R'Vɟ|G_{| YL_مN'lRH ,j!)`-)V oV "\5m t!MZTRY]d@4!ȰGG9Y"[ј_F5yWvouQ41N/J$Q"v^pX].TEKsRJjdV6<[nx"R kK]\b.)fjqJf)Lڧ#x@՘"]-rK/zlYvm}E9-&?jK[ծ8@G$fr,pKu7țYB(͗,x(n٪)XY@&هy㚷ܿ2Lؘ_[ST)DE(΋6¬'YKAטvy8Y\777a&(݉1T $ݲ[$igogCfX2Ĥ$J-Eep/$ͥCUϰg-NaP%OZM @AJ0\ pQy+)(b>gIjVɍNP+%IS% ݩH=:9~ⳅA1'suT c/t:J*)D+ym};t ʵ2L04<){L[.~?LDv#-\ҪO A0q0s.M]ʘ1ix @[(qos>`CyB1cC e&~j1 3Wʿɹ&_>ϯJ-q:t(55@"477;hIqT&BKBs)|Cz5A>>k|3ouJ*M|IG_zbwگ_|MըJ߄CgDEE ǏֶWNJ\rYJt ^ n R-6U@uԩǼa.PzauW Ś(qCnѸ=≋IOaÅN;7b%0zi&魪QH{+{X۔4I1id`Ys'ӹ/oO@}4 )DIՐ!C:YN'g IS:*(=G:ߛYּß츚>UM~E>cFFF:tH؇nU t2I IBbnf`* 拒}G)8Fn{-j'QrYj4볋V5O>ijSj0 ૨'/Mᣯ{l2+,cYʰ_IB )F`PtKBI|Sifg\)P䳝ޫ1itZ䆾 %k4jN|gU#BnU#tf +oě;Hٞr[hŒ4eI1LU&,ɼIqXZQ꩚̟?/F!*{G#U [\PUL]T;;+0Q@' NiSWd;0]:ãtI\ PttHqqgͲϙUPSoU3th[0c@;e73 $Q [l?ԏ}ɓh颠y 3Hx\pyt JT5 ffƎZh?|~l)TI#{̼M]"g0{`.=h=O9qfi$Pdž :zUON^#|#f9}/Gƌ }ѩAl=NުٟT t 0,3jHېa^ad է3o{.Đ@Y@bvDKcm;&@(#GN>#~\@`訷>3o=9IE9P5P5$MϨo 3?iohb>¸@`&2g3,P5P5@jwM[̰ԦRX)4{{T_0T j0T*2P5PBP56Wq5@jjUT T T jT P5@UUUToDQ=p@`4RX_wiU@`JnPX%ri? 8BC[g64UM+@`,(v JϧC U ]G^]dj4A5;T Mg^w##ߌu}T j0l7}CcA騚w>'@`t6MYbq ={o#iBaddrrr$ LP5 +bw81kE|4>4cšCH|yyy@`TD& 8mYO'Rt>OHX{0 6%%% 0@`477~w$v#O. ^ _3NJgUUU0@`LY'<îfk Woj=hI?~xII P5* %އG# |$|[5,VVҰ}P56 UUUyyyG4PWBbwhvQ濷/c=bҐ}S_kwT F%lRSSIfpσLU1?j}?5@k̗Ͽ,qT5_@U=bpϟ0 @k?wy&j5fqq1>|AUUѣG^XRTCCCyyykkӧOÑ{555577iT =x tK,{<zlT eeex ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/images/sequence_architecture_cdmc_sync.png0000664000175000017500000013331300000000000027275 0ustar00zuulzuul00000000000000PNG  IHDR񉽽5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gU[zTXtplantumlxTMo1[06*-DK{*A| $Mvo~~۹W*bv)CFh-c*z=b#+K-3ܿVA -J>p'ICB*=̤ KhAW ,ϖsC0v i@Q(44[r{dA *RX?$.<C_5OB 8R( ćj *4%+TRkSD+Fsd;5ZIBf`u0wEӍJf5M-T4=Ku7$AdÌZDLiBoёL m½%)_۞ j<lQL kI{V'xWχae177s4+}}HD2]$˘h>"l߾7Po@rJOM=iXx !w!k Q >|-9ͺ>>9jWr\k >bY"I .~[ٚ IDATx\\l8EJi);ΝKZ)MQSLZ~)z)7FJ$M4RFh41dT.&b:iꟐMAEJ =?g8<Μ9fs39yaM$4 Hh301~]''<*SmHhaua-Ie/;o~)/ݾEe #//Uⷾ}rnޓW$O&6|b I\'׮0ɜ2E755=쳝F@B C{v}A+l:ݷ) MFs\}}}W$ٚ^zJB{衇}ٮ.+ZhUw (s+}=;wlhhhmmd {5-5i/wdۺ*珼~x玿w)o_͏$ݻ_9 2%6oaE[]v%>x{gϦؽ{7w @B SgZ}E*p5^{6FuMw@B ΣO4~vgo|q-w$x_@B .6í.<8Ib8漘@GČɈNhyoDG/]4˓_hBLy5׿ ".18"d`bG7]D<3'#7u?|x%W{{}Po75DM/$ i^|#{9)(N38"e? S/V?خųg>M2}uё{% id p;N}@q20@G,HLhg?n+w[-9sr5ռǏ,q=vX&F̠O~ξ>>b8B]fϻY<6ޒBݶ?hZ<{W~1@?<}'_Q!-Nаn[[[?. d`1.">?fOFVBHMW.[okoxu/扫_S=k2CD֑.xn{ݻhooc1P /fpt3E,Jhߩ7s7iLF$N~c L鈺 .xnC.}@q20@w'#(߽%Wr;>$z5CHits{<[{;wlhhhmmd1P /fpt3E,HIhڽe-hxva'(N t$hMhWԽOmH۔w^Vm-={ſufPpg|05%%_~FQBM%4[3/>v7?W mo!j ㎊OH'&&ȡs;Hhk. eZ'g/2()+YJ#tk `5& -업 ?OOf}i[ nX&9j4> _ W& Oo羡gƉK iϔGS/4$͸>cۿ|=|q{}}OˡfY_+/_w݋c:fy?-*Z9ߨ?_?7acP$4Zd~=(++5adfߗf"0P={6.XkTX/Xb9 [*3u 1qz֎vhWCqR{sr>h)O^a6_pضo_N)KhZdm߾{nTj];/GTgSZg 5vM]~Cٕ/n&NhjMg,)j䂂k^ޑAz?~{jlL huvپ! tkmE|_m;=w\ЋsÆoUTڸztoSheBV酌+L9ٴژ3hirifbƴ>zd>: /mgP3o?ffg QGyj%"_y15EFa1~=m@@6dsƍ+uTθUR̘G|뭬,,_^#ܥ/eC'ߗ|Gi ]2#Boh'σ[$4ڏB1` #k[[jyϳkg` ]r|-n&M~};iJD!fj;+oz?{7qz6~O,[?o_'Q>͠mgP3o?ff:Km<&%6˞ 25Ưg 1ȦɁlθ1徎jP3z Vȁ㛯*mzh)軂wvDtٖ־[ׄZ25lJ%4훥hD㟁i\m-x쓞3A K5>kv&4_mshee0I$(,̗vӮw֮G}^d6{mf#= *k# Ϸ>]EWJϨ)oOfoyiff淗JAF vt_qCrL͠|5a`UvMiUZxȗ+3^ɜQn}#꥗Ւ((Hhm.\x>!]BԮTw$ 1|5AzmADx29 XU^b6oS{6 2o9}= 5o9Ö|AlUZؾTT喲 %#?ѿVU䷗%_m@+Z؎үJBr M3dY'w N=;[:xE&m)PIarOhn'e|Z ͠M“֘wѿH#*iiןjM5yUZ3\YW#Wg#xR+?1xo>rřMSR  ' dӾ*ӭ1/U:*ت$}=<.rHךIu{(O~_ݶHSw ŗ0_G.V{BӛqR!-( u,{lşhmj}l =CK+;Ϟ}RtR[gaa jA9,gfx䑻22> iS4]D+ٴA!ɧQYY_.V :`A6߽kM{&~ZiW+cѿC[ V[#%K>/#I7tujҞk'ۍx;d?Slkz*0MLƜVkܘuTUIh_#ȁKޣi ]!2#{jYA? _ "-i 9%#%%_֗(o2rr>m2}eNY"k.9Le"fp5ȶ5k~im;?MxKt^r̫]}G"I$1w ~^;Ub$7={6؞dUڦ|5B ],dAYnUضӪ_o?0|b3LSR>l`\s^|6Uƕ9cP֒l;^xU]S[Οexl𭤤-z@+}W~#bzhZ? -N`usy5Kp, C [ $400y~2h Md:#yXH]H^^3; X $400yS .99Y6G9ٳ133b`F'BESO0@B#10yS m…{]`A"1)0P $4wq^~ŋvIS;vHOOX,_~֭[##l6ϟ?رcvyann3gb޽[`ZKKK9uҥKYyY=5fL&y[RR*((P\Jl۶-##Cf賜~tt ' \&j$ȸ6QBl}$MNN_>55Uґpɒ% Me|ddoX6OVV ooo/++z6lO].lW)//޽{"+" (N ϻ9Khmrrr_vyyyGlkk'm 򰿿_{hZ=Zwwq6L{Ey|/^r{/<… .NdIh|=``. tYF{X^^~!5n6%1_C_{}j6o޻woooߕ+ %L^g|/1.Nd` VZZvVqq$4ѱe˖M6 ?7" Nd` o&&&jFLLL.-2c(ܹs n6^xA{m) r"_9Φ ~'wa m>N}@q20@w'}&+VM);ttt[nw zB+,,89塇?_׶ԍ,X=ݻk%==]%1x@q20@ϟwOh .P6QhM'd2ɿ{oHSO=xbٖj-))!VnH ΖG}}R+^|/ɟ߰n߽l၇#! ni?92\.w K\Z^ڿ}bG7"A?IB v MMM{v"\ͥG8-fpt."$<~I imisiyiA(N`nݠ#bfd$k.X.5@%`B9(?-Z3u5א>@%@@Č71Sz$4-P $4<@Bo|rwvv S][HhAy67 Ǐ688Hu|lJ9(Т1޽IBZ{{{__*! ̞:[+]>\. @B-%\?5WvIuHhp'd P1ㄦ-W\t:fs ^ҏcyh2>Xw94? m@^}n6dz}$4@<R M@SSKׯ_ $Pz)Ib^zV9E"vm2~˸ c o^tg>~+gHht .Lhd2:4~e)sJill֪<ɸ:&oGe9۷< /P]Hh'4OC]}՗^z$"D%Sy@V=!LhpWLh2.JƇY϶nݪ_mbٴe^ϡ8+G 娨<a|+ Ay \JKK>S\Ɵ}YT$4Zf*o[ovy\6shsõkʿuuuTfr/Lh?J6(Mh)/677_q&noڴ)_~WeޜA:^$4-P s].W+ -_WBô哄V__/?-[~a LZ___WWW' UUUЂ> i{]ZZZ.gj@b5jHh$4 - 'ޢAyFBb&흷AyFBbHhAygBkj0Sz$4-P $4<#|lJ 6:z}ߐQ\\|חFoJ%|lJ9(uݻw?Ͷ9ܺdWW*!`655I؉v֓6룢$4uvv?~\0}nz҆ҒT>.'&'m(-9<005_-h"_~ey|Cf/ @_ \(Fjƍ կO>-xMh"''g۶m]wgSסI_k^_ :8@*!ZCCCVVlPTzzd &Jh,*τ&u;^F={1D_>@@B! ~KsB#/E T swHH}髵. E$4̙ަ#LWIH{,o,+6>p4.%RHhWG[9<w\ ƽ>׿94hJ y=RC>x͡_ō9;! =UXq:u?iFC-zP $4pSK4! =UX=j?tE T m6PC$4@L$4Ay-z|%p}<@@B㸢Ay-zP $4<@@B㸢Ay-zP 1и3r|xx[ou8f911رcӼT̖$@@Uߌ1mVŋl4jJoooIIjE$ hsl6@Bdɒ+V\.Ϝ斲ϟf)//f[|Ȉץ7|;ζZv}ժU2o֒644+$H+W\d׬r%ȩ1L&b<=Mh#5l6{ZvybbB_YZ\@@BC|ɒ%MMMccc2>44T]]JHH8w6grrrKK ,[WBs[ E $4ZcJKKVdlM7olXt:322d6Q__+-p={1! >J q\{N]{ȋJ ;isNCÒ׿Z[J 3~||O^7Sj ~HS'ӚRԯ~J $4þ|A =b> $4$4b"q}'m@1_ m9HhJ q\ y8of J 6G$M8Ih0zP $4+*ff–Hh0zP $N$4 +/ iNgvvbIOO߱czJ`oooIIj5###)))<2%55Uxum`M6yիW''' eeeiJDkBpPTUUe2|599911QSS#I&r-<2j*_3{MhsOymu ɂ[ p8.IUbb>}Z֣M5Ew qGwwwl6v6hRRHh5uttJt:~ϷZg&IM_tiSSȿ+V03S1͞k9b2i =hsDnBJ 6 O@Zp}'m@1_ m  Z:=<@@B@B#DuB zB" y9Hh$4Ч@Wwm'ĺ:B:;;bر_kmիW?']VV622D6dL&+///SSSkkkUXV\PP000u^g}zP $4戚f 3CCC$RRRԪ$UUUyyݺuEEE扉 l˗/[V,.."# ϟr&''eU28[5J 6Gt$n_Bs8uuu^Uv{OOMJJfuʋWGnlO@*ём TBWff5FXks.+??jlO@*!w҄f|MB Wmmm6k^^3HhΡɪ'&&d\ }4.҄C(: Rd2mzyyuВĄsik޸qcaaaoo>}cz:)EEEڜ e˖iݶk6HhZ{9vttL&qjfsvv+͛7[,}.."~Ċ Yd-eekݶk6Ih  B;is4JnAB5 T 6 m}spЧ ֤AB#> =hsHhO@Zq}'m iWMhl?cvؑ!sss_~e5XeeejxyybIMMizdyldd-9ly6==]Q n:w AKꫯVUWW O)..jzMM<Ԧklݺu2DEEEUU[BKIIQ/f``@{ vKRC٬m6[ww rj<--G0ݮMMJJrKh}HhHh"CZr3_d2QZZ*-33t -6Gl'4ͦ?W9/ǝBdAЧw ߷C]o622RTTvͿq^?}tyy e٤$d zs\Q ƪ,SV\9>>HEElv/G i&)''m2%++KW1l3?>}zhM*ԃZ%ky> =Hh& ꁄ9exSů0}zB;isMGoe~(uNѧ|SNtO|$4"p|5h+G1 0εqECW?}zhM* i@%@Ч mq}'m@1_ m  1и6 O@s U/fC~Ч@BmMhARyyyGeǁ> =HhWT0HhNhAMb@$4+*$Hhi@B ! Mq:%==}ǎ)*++Ȉ<J  RRRJܞT]]]TT4)EXV\PP000el-==}ǎڂWNNNEFFFig+3l6ɤ677/^X[jѢE366VYY0EF䡚.Jނ<556}㾶AnE$%%%&&2[ѷ }zP њиG*mOh|p.krrrbbBD5=%%EmE2[UUnݺ!B.)bxxxf f]mn)33^y_2_[uugxJqq<Ԃ<Ԧkl-5kHf zm[> =hsDVBӓP/{&9-11Q;~ vGJ>Qfo8мnmf_ە$-uHaavRN`ٴ d$55Ui}m˳A|mE^n-c}Ч@BmJhƍsh.KƭV oڏ:::$HZt:^6lΡڐAԶu3%8p@qנ+_vy㾶V V;O@*:412.zfٴ ՅRũ_Zo+ 6mr.ן+ v ޸my=u+C3 }zP ѝи6G'4awuWnnz]l2}P@̣&nܸWO>]niuVN<)r KH oHչsk'Tq"==]ɑyNR$y=jC{6o,S3nw mll2eʕ~YQQ!A"۽n-?ժ$irl[$4@B@B   w iAj=m@T 9"'quЧ@BmHIh3uO@N$4 +o92iYKFmp8fsnnnGGt:-Kzz;Ԝ5OoooIIje )jjj괉WNNNmz$4$ MNNnڴi6)m)e.KU^^n5kH2nݺ"(+жxBSfw8uuu^$&&&3--mAӣGGG ^ @'4;::JKK%/eff:N3\|ժ~h25KJxOh\I#>fy}n777OLLȸwΡiyO@*!H \) HjbBB¹s紙evزeS]&3C۸qcaaaoo>}Z }zP $4+Q. aw !++l6gggk?2ܼyb&pr^Zb۽%zrrrd/i@%8hsDeB8z)+2CE3> =hsAB0owoǯdʹ!J9% =}\ﯾ.k~}zP 1DxB[_O I_gS>x Hhޏgj8w\{b _\'gS5~U%}6HhšдyZ]r! ?S- MM0֩{]^N}M!J=RisDWBӜ?A{hO@*sД ou5=OK> =hsqBJ 6 O@Zp}'m@1_ m  1и6 O@m+W_q#''q'&~3+k$UJ# lٲ|@BCBB[ =Iy3vڹyciW^{- TB$4Ih$4@B+].W{{t}}}ô =Lh@`'4򰩩pL&0niH_!}-HJB?<~ Hh$>aooR$MhB -Bv("0eee޽I>Z $IhLh~_9Hh$4']q>\.Z $}n6d\?obȳ6l]<{饗g-S,SJKKe+sPv^/ /~wdd$!!!33SGƭV\^^.]wvNEɿE8p@}hnnѣG 69/+?s精ޠԕ+W&&&z322ORD=H ny;^gl-j.ϝNɑCl k3~qs=W># MJZΝ;|@t'4Ü$wm&~kI)gp 0D!KVC/򰹹СC2rM7gS_ݬXBя~CH^^L\j3<#2EOfv[Je-V+**d\7`Q3_ TYt精ޠg_ 9K 󜞔$񠣣`n +v0^N.FڵKf6lsj|ݡ:Hh$4q^ m1Lhw2˸LZë:uJ~S ]/^̮$|F@#"w縢Ih$4> J 6'>G@B=HhWT0 W Ч$4zP и6'vB5a(q[4F*HhIG]6?Q 溣$  $4 ! j悂5}||bj)tȴi&AfƴV^,FFFl6d'.Y2ah/&)))11QLy 9 -J|q\j$h xxhtGQ׭[WTT444$۪_|>j_[<4~ Ț5k$mʆ$i[}@BG {$(MhzxZZZOOn M?fSv]>::/iJeqm=ƯA"A@%@Т&\|:oE|/WB?4tnK2> J q\$LhvybbB_fyM ϡ944F*qEb<%''Ȉde˖]o62tͣmƍ2~i7_ MLm:4v#COHh4q}'mNB҄t:322L&ᨯ߳l6KsnKRle) i鲹yI mll2eʕz|z$%$$p/G4F*!N@B҄(倄 m, { "" mq}'mNB#EHh@,VwG̶9 6W%=ҧ$4zP $4$4D24F*ڜFB#@IШi$4әmXw!SdhKZڶm0͹<555III?kf.l6tqoRWVV&LywqmDOM-))ZTAAۜn+A@%pl"$Ȉ䙪*ikk̦ͰrZ[ʆ&''7mڴp{[\f!>>>.QM 9^g˗/VWW O)..~w{*e j\W\.YdbbB^Iyyץ:@B=~ $yBs8uuuyyy{3g Tn2It1j<--۽1Lfٴee$55H|˲TbbqBB:::JKK233NSOeeeHyy^s׫$uSNkզ\|ժ~Ө~Hu$4kHhmBӴl6ann'''ssh"bOOw;ed_Wv{ssĄ˿~ct|- Mh*_IBKNN֦766)~CM"uh$4xxxxddH%-Rԓׯ]Sl2m愄sym[@BmsZ$4e6_9&CG -7$1666VUUerJu[ { p87o{㴶4@*#fۜ! ͗Fi 2Hh a޽{7@B=HhfX /@> $4=HhЧFB ;isZ&44@*!@B҄7”wC$4$4 Z|%4oL"#PaӦM\$4$9 -ZuuuQQby 5556@B=>bIhl6[wwTmmFk> $4 n+”ܮ734@ Bl6\@ T $4YBQכ]62@@B\IТ=UUUY\r||\699YWTTh4@8 $yB3L! mڙ3g HhHh -"jݺu+$4$4AHB ̈́9 9mNB sBہ>G@B=bHh !8 mj?: њT0 z > J 6'@zw$4s$4 +#?Ih$4 "$4@B@B#HhHh ! [IhHh$4и6'EiBc_9ym# QT m1$MhzqemcyJ=L> $4=Hh$p''=򽦼w'^G?žUЧ$4zP $4HhaLhwGˮݟ8 Hh ֤Ihԃs;Hh -6'ExBkؾYx2A}y}@Bk EzB{#/>_;?{SㇿDH@B\|=?Y6 $4$4#/- HhHh PBS. Y0z<- FB ;isZT'4> J=Ris s$vڼ7Nўs$4zP $4$4H2,1lC=$4zP $4$4H'6}^$4zP $4$4Ht$40s"s%JDŽ9 9BB؄۷KOrĉ^=Oh  ߯_aÆ-[6QI!J 6'ESB03<<+-_\R֭+**Rی嚜钎$ոe!RRR&yj6l +WU+'kJZ𽷿fI2}||\^AB3-Z k MO6V+=LBf6}v@I5>::*sK^Z ;~ļ{^|טrlT{iiiFfisHjj6:eFuѧBд^k*,//{=LBf|G˕oZݦ^GGGiiLө&>SYYY~ps^)9k:6O͗ȿ?y/wX,LOO۹ĵtV,{ž}?~d~5~=$4zP $9 -J>'4ݮ1s#ӛ'&&d\|-U[[Unn'''ڴΡϕq>5>ٳQ2o~ 'NP?ўe6>)9OZ`Z `ƯF*Т>nݺUE'O.[k ڸqcaaaoo>}Z,99YxLՖJHH8wWdFY|g:4y%~Cgf###EEEnסL:4_ںujz]- Gkr4ب! $OhdZ`<1()+==]fnt:322d$zm͛7[,J.'.v ElJR~{(K9i1jS[KWikiXNNcxHij%T.IVc 5ISRH{?2v]vfgG0;3~ g6EG ocph^EEE1m =' @t&4?.yL[kw,;uiw IJw>w<$4$4 -IMy 0N\[ӕd:r?/NLnF#$4$4FB V]~8y$ו!Jt7:卿+f/O$ee}[2o[[o5Y,+TxHhHh9  f:thRS-iQ͝{_.lߙ~ȑXJJ,qSv֬N!99%!6o'-~%4 T C$482z^ȥs.9nv?e+WKet~Z˲<&ƒ?aNMkCB@B}EHh}Zx4e5_RJhe>@%@B11111)ӓ+3/8VzMh>xǕW^LB#T $41QiH]vޯ-yyO~ۤu-..63 nTBx'4Ih$4q$4Yۿu_uw4ב\I TB&4FBbï5 $4@B# =湟4# TBV @(tY!{lht:vر y~w{エi?Bd`ᩭa@X&4'^Fτv-?i+&N# D`$C L2ՀJ !ڼKFٌ9v?BidHI>rT Q2@vJJJʪi+l 2ɐ$ OgϞ#j@%@#zП֬YS[[{a iv\tzj-VZߨ^xeij-[455;v&}zP 6gVÇ;;;i"$4 "hgDٱc$q;HhHh"˟^: N}а 2! z'702R2YS[g^qۇ> =Jh|]6GP ڝrkMb =hsxב]xAgʴ> iOCq@BexCXz[.⬫xК1cnjID}ׂ@>TB'4K][Nh2>]#IH@B?on&1NOvY/n1! Hh>жe{yozsed֚r7! Hh@l̯CfctŔ5 .NH8OwpO|~WT .:@BexCبu7~YqkܟtL23>-[6lOϏo1p?V=9'}4/u}G֯\(Ò -99N]a`xx6m3߹m۶BJ 6^,ٞˇ=/LyҮޑM6=#;vسgxu@$4Zyy>C~C.2~oU%zm۶IHۼysmmmss󫯾K4 њͣCO-w.4xڙ3?vw}[io#oq:[of暚[ZZZHhOä#FW~ }3sӺx_LܹӮώcb]qJׄ`uuuTS+~_6k:Gձ_Y^3$48te֎޼mǿVsRxƝBgϞUBs'2)7o$m޼Y֭[>#=}'2)7oٱ-[۷x`rq]-;喧2ҹc7J7rwЧ@Bm=iNA")dU3ׅ.;J 6ѐnQj/e;A, i6.:=Hht ogϞm۶mٲe8ݹ|?d#)٠lx4! >v憆 ȦdY^}U Tk0وlJ6(%  !m``@TWWd&Me3$4 hEGHht o<=Ih|]6P@Bm-zP $4W9(@*(ZD6.:=Hht o<|=Ih)w{#  3+VHh@B͛Ws饗Hh@B$T#IBb\tFW$4%nt?~k``z@%qB 9(:-\7a T 9(Ц'ر^BZKKKWW0lJ 6$kfӦMNg[[0lJ 6&4ӕudɤ3e~Q͛kkk?Na $4Z7Pf0τrBZۣGN-P $ $4+99l6'%%ɼjUUUg,vժU^s k8p@7l ^d~߾}2OM&AܹS@WVVR|FFBINh33InMW#n?uTWW׵^+?z i?WWye5W+**d^~kp_)iJ\O9i$]wuW_} /P QȏƽJВd̿2/KW{ו;ui׮]Jn|uy祤X,t'%9 z!W6x!rڋ4xMru6oql(ABL~Buf_y֝+۷oux w֭ey`+y $&?q`[M|ͷ'pM\uUgeeɏsYs_QBy睷sN Mg/FHB[P._nhh 1˾pE-hsP Ihwyq2mwmص^{ԩzKbx}y.rXQQs_^򩧞RΤ[HBً4x~|rzȽIhAycP;isdux饗*'$)g</ܽ{y7>%: Mg/0lJVBkiiq:! A@-^kB׺ ͚5[nނ瑛J#a Y $!{9(YQQ&lժUqk" ׿kn۶z 5Ьb5`G?~WDX $4DHh$4 Hh$4Q.LiO{ei$4Z7P`  ["*us=n \spuӻI$4a T 9(t644رcSDXS,CSZUڶ@%@Q---*6]i?iIiOiUiۮ.j `@BmF?~'j߲/\1 -))*mGa $4Z7P4]]]$ZZZNgs{N1HKJ{JJ Pc|0Nh)&OD[[qLiIiOiUi۳gRc$4 Hh@Bцj@y-zP Qmh +(ZHhhsPE T 9(Pl\tzj@y-zP QHh" tt=3| k< Hh9  2Khceej5Lv8%11rQZZ w\^Ч@Vwmxb@̔򁁁 =Hh& c \YGGɽ>wЧE9.:=Hht o<Ih%%%yyy1 .,''G o'Np8$4Ч@$Uis1ІnFٜP^^.3˕b2222jkkIhHh"CM D *111&&p \>q]-(@*!j_W@@P $46)%=R@%@6}]'@*FI @# 2PccczzbIIIٸq,Y`EwwwRR咜~zf6333[[[uJJJ*++PueY.XV?udɒ12R;eaggg^^^LL<*;;cMwЧ @9sfSSH)**KfSW(,,,//WrK~~~hEEEVև-[&qHKTSCוeŋ%)?1RyK-(iiiNS222"Gp8>J߽  @plnsέ'O ܢeb6vJceY.G]jDwߋdK;Wkk… g͚ب,|gfϞ-3^s׫ Vcǎ1wvvKNcbb4*_4~0>$4SdVU133sʕ)))EP${P=b?W2 /ZrrݻGFFd^ ; #$4Z7Pr8J.di&KM"uhF;00rv`7o=Lwr5u-RW545uRg6E}}EG ocܐq/G# mhh2PýQ?55d2l*uիe^AT 9 =hs7y\of6i@%@#X j+0e 2Lh%%%f.n{\uhOA*!,p~/Ǣ"˘auY^PP.}N\[$4Q&0L& H={_& 'O2Aնe0v˷;CB@Bbbb~a@Pu?%5\䧮UqJЂ6Piɴ|<*Sj=¨>hsXLLLL!5՜s2ӱ^|AV 9(P?=M/#F1]\wo@B5`PEig5^A6;g|CWOˀ$4Z PRo|V9osҌ@Z<C5_Ҍ@Vwa˷sMEB@B?#$4Z7PE TB$4@(ZJ 6=+@@Bm-"$4Z7PE6cǦe B (@Bl:466[,7ʒ ֪+tww'%%\. ]ׯlf933U]$>>>..RMh^W岎j5LВ%KbȌ;HJKK1$jA(Z4ș&-**KfSW(,,,//WBĞъ-[,;;[KTSוeŋT\\lʏ>q*e +d^522RPP< +6QfUVVvww/;wnuu̜>} )fׄuj-Pˑ<~ Hh6mhL!&4#ϋ@BCh%4t:ϟ|Ad2y]_k5#PfQ1w  д5%''޽{ddD_䣵N:Z{$Qerq@BCD%4z-ݮ&eEccc{zzԇk!buCS:9sS&yTC|/ǂ,Y&fUUUW^mX|!CCCEEE1r^#ښnڵr@BexPB76io3< 0@E  J(Z$40@Ee FWh0DTB{rEƂJh$4@B K F#@B#0$n%ׯlf933U]4!!b\.{)))ZZQ1==]6qFGwYAvdZM&@BexP;y NGGG+**>+v,)(((**e˖egg: &3g*۫ncej/֊6 {pR%fe>99CvO޾f9*jx#`, 6 M/8W~c>Zkk… %t͚5Qke{r5I(ZјВ{{{@ĪQerWZ^Ck$4I(Z(.;D` -66gB+++'N8PC(:9s% rٔ$%$$x=*Hht o<|CD%իW[,ETbRJJdȨ,S 3IDATb W^ښ)lk׮U˦fϞm6o9zIh3BHh @B$4Hh L.; $4Z7PE Z{rp >hsPhHh 6P9M{!hM*<@  Hh9(a\vX,jˋ1ٽJQ6>2UUUl6Y'--߸qcjjyQe%KĎQEiiiBB*??rHht o<|Cx'܁12RNstttddD֑5hЮYM@\rZqqnWD~?+V낂"Zf:::vFT\\㾚GBSOp:?fejG"AN僃$kHh y3Ns111wM&V3 =Mg}wZG atP޽{ddD_Z jshZQ $4ieuh1v]7 ʚ-Z.QZ ML9aYYYNNNgg̟8qBJ9j@y-IhfYSSSM&fR^b\-644TTTdSXX(JHKIIgddFUB@E g啕[+!1i@ a4+sK\ct8FBcd (4CТ=}|ZbSj$4Z7PE 9B11ETsELǮf@BC(&4ΡE~[cZo>ݟ6y,@B g5ݝb+iz#׳ Hh{m[( D8.; -ڨ-OkBAy-{ݸ,Tt:Ϗߦj  M;v,##cppܗ Ck}ܽ{Ȉ˿iP !?duaYYYNNNgg̟8qBKkr͘kniW 6P'8ɫeF'X@B m@8;sl'SsW2ݴ!c 6P:8~鮘}k5^+f^4 c 6P.,%U~tAG.%ˈg$4FV@bq!|&4K?(y]-(M_2OgM_.Cc, 0 M}Ӎ_N|Ҟ4Hh,z+$49ڷ=]o@B4\vlߙ㕵NӒ <4'40&6mhc 9(PHh0Hh9(@Bc 9(Pl\vHht o<| QHh ! $4@BC$3f he0.<@P 7111F5cA$4@(ZNhҥK322Hh$4FV@愦X,^5ˍ&&&{Mhyyy111f9;;Wm6,lmm$њAy@BZtMh%%%cdkBKKKs:###$:u#򫊊,:. (Z?.;įC3g0&77W~Txb%߿?==]=rʀOh*KfY+d5G%''wtt(󃃃~=j+2#AN}>`Ν[]]-3'OlÔ @\֣&߇/Wgyf2p8*++ >hsPh %4O"VrrroooZ)8sh\reJJ(5c 9(PHhALheee9992 KJJrss\.nwxdM(I(Z$4%L >44TTTdSXX^`6/:FkR<!-//WFWh0Ц)WWp;p}@B -Y,v!^@B$4 !дn2՟g𹝄$4j@y-Z "f,nAy@B#Q$4FV@VRRWYY|tccczzbIIIٸq+~iiiBBrԍȎVd8a!'&&ˋ1ٽ^u5Y`PE lڲe$K:f0͜9Sَ"+Xn˖GFF W[x!<א3.\PB׬YVv~Q'VitΟ??&&FSZ1HhMB2rƇ[Vy)99`: Mlj###2/jH@B !<rD)ס͙3G"!MVW;%IBKHHP[.++'Nh]Ă٬d-{9.RSSèR{Qk5b@BAy-t,nAy@BABcdAy- HhAy@B vBysX@Bm-]-(@B#1DMB GYHhF*f6Ҟ7ʏGUVZdIb$&&iQZZ w\$4A Nh_}OOV\rZqqn+?*KJJGuVX!@BQpBSOpI*l6+V] r|RRRGG\ `x"!<@=Mg}w&X +<@($4~9^/Ih$4<@-?^orvuh1\],''SO8p8Hh$4<@RoIhCCCEEE1r)((0 rb2222jkkIh$4<@RQ  jisPE 9HhS@B  $4HhS3M HHht9(0Y mϡ "$Qʴ9(@BC7> 9(PHhtG RAy-h|hsPhٸ$4j@y-X5 O ! $rL=/ 1m ǭW ݍN$4Ie7;ul& FWK(ZSy%5u]L㱁6@%?]UAm͔fG?`3#hsPE N@$4<@Rosh_9)>hsPE]ƯC;_/ Eu\NHc, <rL[BS(r)# "$:e{h Ԁ8wxeӴ$Hh$4LsBHhAB @ j_WAy-"~, 6  6PHh`, 6AB @тa j $4D< Hhք6cd(..@BL\v MY2::zѥK&&&vttЀ <%4UYYP懆,Y;Ff?@Ϙ~zf6333[[[Շ&$$X,| X +<@@\\2_\\lʏjB?::ZQQVX!򑑑"^hsPE &f2cZەyILLTz>MB~rr x^hsPE ߯&+ߪIcf2x! HhAy-'{Wj<ΡyMhɽ4>cA$4Z@т@ %_}ҥ$77w``rvм&N?qℚmcӧn[w'4&=),KffD2`CCCEEE1 M i)))&)##"I: W?9b1 z59 0Кh+ntJ<;ޗ}ƳGϿLlm3)iwA 09rN[$INS-!/@BQABizL#O9lx;Q~]ƝB@E ڪU6lpڊߐL6YYǣ/IdX&4@(Z_[[tJW>˿G2ֻSǮhTV,?ie{_ڳLh$4SIZ$4FB$4  2(Z&u@@W 9(P@%x_<=hsPhٸ 2(Z! &D<ZPw{_f?x(N&h.fIh4]D7 m(ctMϽkDq21Q4oT'7V?pzo5TNo<_D1 LL#HhS_3Mj:^#&bAxGLW#7͘1cw$4&NgKKǻx(N&&abj`BLHHC0 OvEECCCss-kDq21Q4PkZVVVuu9sB| aOݱcG}}%ZZZx(N&&abjHB;z +jƍ)))峟?lf9--߶m#?fffZ}.qy.)n.aRǏ5b8(fwD5 MMFFdde^̝;Sܿ{P ?vww?ČOhꦆ-:GQ7MVV (N&bAxGLiBkll\lxꩧy٬55i&?jWVnqɄo*z6$4:&&bAxGLsB[pU[ӒDkk5k+**+++g?# qNpi&BSƟ 'kF3}MAqEKukטU̡*hOD~} -o122"Kdϛ7O[Hhk7XR( rEݿ8 MG/֭? 6ÛKhOl>jM7]{tsT%ڂgϾtiAj'bqF/yagJ\yO>rRS62oޗ/?߃W9Z4T?LVcFvBkLHO~ߓل& !ޟy)wMn;B*IHk3x_B=)w immUny)dZNN1[lIKKS'%% Z={rc9s樿xԎ;璒"Kwy*o\$O?_CmYw|Wp6 keԩ4rv$RynmC%VLs|j:}N{yݵ.[TG#<iVV3ZO_=}ny 7Źj-Ӥ*ud>>|قN=Qysw#MA5}W#dzGL:!H yδO+O?]5wn,jbٲ_=CFo7)'vnٳ?%l!??c;ˤ֕ ك 7t Mk ǩy M:m2 Mշ/RHv#6 `MdWMgLjO ʿAV#@Fp$9Rt6yFΖ=^`gzgnj>Q|&4Ac B:/PFSFv]odwiȵޕj~2cl0ԯ.wD>.Yx+ɫ}P^8zDsxľ+|2ߠGD>'E{BL~Y "Z?glbH_^佺w:uysw|eϳg ^ٯrд&M!A...ȫ#cXF7&j:cA]FЯ>7sdž +9OijFIh:[xQjG'*D fye B:/PFSFvO6;4~Zʀ[5lp`8xb!Jjǟn2uAT>Ui_}e^C':Θ]w;B˾|r'ԏ": -RfIިU_SYZ};w:< :Pϡi-_ Mg j9Dn5>_ow:_&y{9 ZS^c:}y{!LzFN!>+߯1^Sg" g/1>JIfB:/ǣPFSԒVclu6ekHo_iUII]_u8YnAhf $&^r2Dմ!KZFk}G hIsqVg mF^YkcW,o9k Z>;ȑX MM:g_Q/!BJePՑaY~*bV#TFЙdO|53)&$DL&;i7_r&'OYBzWNbTBӪd"LeVWEfGEEî?=h)B9M;b+VTsdɑ;o#)IHݨ/^[M3+Hy#ߗxB8-~%46N1套Ϋ#JROi]|H{Λ ~Q+p|#y++H<}#) ^ٯD^#P iJ xFz!o(#=]kUGc?/Mi+'U귏ַ*iפ=}}{gJtgJF^zt'^?/i=Gi I?=^oer?^o)l/r+W?u5^, g~ M>?Xł_ʋhn~jأ> Pe:p`ݯ_ wWzr N!Z=}A#2tӵ_ftX7bzwl'mqNOȯtYxHOhdZј~u)w$jzZQ^'3Zl uvgݺeRQFZI$+wЯ0O~^z{7]w;⡇JYI(᫥:{Kyo~ =Ms9{q?ɟ) _5 ֖`3%2Hdg_g/<@hF55iV=WǣPFzB27WߔN4Ի{mP J6~&P}s qV%>[C'ލ4}$]aM>rlyJo{w:u~񁁁gφKBcRn./уIm"S'Ӥ{~E D K=Agʚ.Oy]]]!O7hWn.Qdt"(MB|;b LLt$Mh#(oӟ_Ssܚy=tKKK[[[___4ΝefX,IMdHxu S&LXLL!UqqwKM"Hhкv'\O^9h}򜕐600@BS`d8(fw m^M˷i?e߹N i]]]p.NdIh"Hha^Yky2,_i˚VnZ__o>yp.NdIh"Hh{GٱcǞ={ZZZHht L'L01 Muh;~|$44")0L2QLL3 ;6 r> $mٲn߾}mmm!xyyr6S5b8(fwt5B=4?WLMs<555=`'4隌w LL'L01񎘮FvlyJoK'o?x]PLhjWTl[Z%TLlYFL'L01Fi廎O)oYinM}ϪC[NgCCÎޝFU?hDMF q'5 D(D,hRBU,KNHFhm@G%m4d0Jbu0V0~>47/{sa '8 D TuH;4?jU_Ng7gkٳBJ!6n-Jvy.yNp0AB ?mn S9aw '7 hj~uŎz^Xю1ppـOQPh|>54͘U;qU'N}E,,Yݱz-~1Uc&YXю1ppـOQPh}}}%=s:k<+ -5m߾c#?!aҮ*[B[xcsy Ν[zrg\mh#WՅt(7=1VSSNA,,X%Y\mhBK3̎BNd2˖-[T gv3@qϼףZZZbmu(CiaKr}/>rl~ڨWMPhf Z mRɱ`v!*/r!|KQh3WTluDBBPh PhcŵBBPh Ph @)4BPhQhS+4@?ɁO0!Lf_ ;eܷ0V̳l6Zyw:ϸ#"4*)䬛bo oZ9X3~ve uZ%}=͈OlΟK~V̳X`֮]2s? iB.r줞 Rgҟx͒&$iB;,kK_?*Fg$̫}&iBuwYi)φ,M ;!"秝 VBBK.Yw( -wPqUzȲ7ZQyeUJU)*)i VM)4@e 6IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/images/sequence_create_and_launch_audit.png0000664000175000017500000010136100000000000027374 0ustar00zuulzuul00000000000000PNG  IHDRu] )tEXtcopyleftGenerated by http://plantuml.com09zTXtplantumlxO0ߗ?\|"Q u_l'6a]?vL`1}iݷd,u8vSnH%3&aN6\ { 'O$ O (t,"B<0eL v sX8~~ِKA"AFj{4Zm[p7g5:RMBl3͆PsKTGh2AJRT;k7g$;iԝyH tIDATx \TUAA- T Prl["eUELemm/Hi6w!ULH1ARЀ TnwggΌ8>}:s>{~sGs@U44P@eHgQA:LvOKe=9t y>= 4=hIJ+<:';Mg)ԍ11fߓ_W>%'?p1q&;?GS4 |2+T?B9ݥ3. ?.}cːKw=+ĩʓ555 K: Kg|%>E2M)!g4{4~W\\\QQARN¾W.6>,Ex& ['buw޽{wNNNiiiuu5 :8Z:c++W\nΝ;9t`vΨ+J}'h4]fnH] :֩rTLJR4{2كTn68u{|NEcժܾO鰧>9++d_'Kh„֬Yã@<4v[o >x%/IXT+\FJJիV:Mg_Mڀ'~ADƚ5k\tkݻ*bE+3***rrrvܹ~MGg&]|]ҏV^aXX"VЁir555999}Yz>`͚5R.C,̭txvhhh.--5?E>IIINV\m۶ݻwK 0fi~ 555 5߾}iii)׉BFFFNNNqqqEEX\F<MXZQQ{׉IdЩtFծyVXBR4_Q5mtaM+-Ng[ lIl @@e &鶋)tFMMͩSʎ;f5MW|b#bSb<(tP@eHg!T_6*C: 23ʐ*C: 23ʐ*C: 2*Hg\PR&p/4_B:N͗M43=qM`z[kt:Kjv`0]:^DAvc'qL:Q)TLJ/zA;}솆X$&ө+ Hn2ђIuc33v:>$DʧeCCCMMMk ?]r޽b& tQwv0Mtt} N(*-T7i:Lv~ayC9qԩͿ׈iL2LFɬΜΠW1@2X e\%"{LgK 9qر}{H|Z_e3#;=~ž}rrr+**:C+ Hs2ђIucݥ3e6o͐s{bǮ Ɖe6b奈7hQheffܹ믿Zuu59;R6+8`dRXd_WBD?w W eZ_]KXQh5kŠSQQActΎ N(*-T7W:Q 𺲪̐sRퟯ#C4(߁GU-HNNZff}JKKt1@2X e\%";Jg[!vs9EW񟆹 )m!攮Abo EFoDKV\nݺ;w+c ejʸ FK&ՍE8'*PŖ/rҭ/v :f  0^AR&`d"~]XT!Vs 7MښJ X_e~^x<,A#|\Lg|>v뎇_(aOHMmm ܠU7/կ޻9&nْ 鑑#zgSLӮ*tMXMyD⣕kڴǪ=bN5`@%ҥlytF5_`74ΰoz;\Iޤ3ݞjkb-q,bH^XaIKE>p#U7&nnݺ9ڵVƮ?~[KC׬Y(yQc'?*+ϱЌ#M )JZYÝՉTaX1 GjK: ;~`. oǰЂVk:/VF ҖHڿ}xкݎz;]+g#xc..=ʕoMzwР;wnsϚ5yƌ f*7o>KkkO71O=>eh={>UF|護fkmP@{&hbzG[DB:C $P&s1Ke+la1oᄏv@|߾}Uwޙ}g _}n(aaTCR999)?)766{?!X-ܝ!ΰPjFUL PI1+/aCN!fW7mZbHmOg(mANi!l:M_6LraѲ;;;[1H׏J#}o=bP3;T=РuXA޽s_dKXKf4iL:zz;">֋͢,z]wy97m)>Cw{FX:P-,&ƍqÆ*=z&*oltձkXOJIYd,o.a- .ĉO3q|if-Tr :X ₃z6>;yd,<错Mg;T:C҂_>wCB:jygo~3E^}߉ůH!iJK>p77Wi0ڵ١Vf&O51 b(HHg~v ן_OcOinN^.o 0t[*}:`c-,vny1gW;,"q,/Ou-`[3,]!c6IZMg~OJ'Hļcy晱VJȸ >Ҵ݀ia*i9f\]{~e1~w^q4uIg_EiFŇ[~˟KίHJaxx#BB_YK4YI|pXI[Wi7.[1j/H~ic-,PZEL.ZO򅍅lt-cWjI/4QOW_6gÿ\\z}.Jȸ >Ҵ݀iaTlژθޖ`c:Mq!MnQunnRog1>'ۜkkʟ}wypt*7iW [%&~C[oQm+1vq톿M(bO: 0t%1jC *M}~˫Nǎm#ltcWSӁ{$RVLKf:g 7=J[N!7k6#*Hv^I5IzzpjeKg}Do(aЭt Fl Q B>͞=#9CNߗ Ow|ee}Я- JO2w{#))zׅ {{8X_>{vmZ[o:F|S>f4SIgn-1nmGOJ> TiCDqbe?zθK8'Nh[ؿs>oq:C@)SF?Txy?Elfd\inYTYG!TlGN v̥bڽn_QtCZ 7E:7nI/~ٹXsݺ9l?ȣFZ;bڵKϵe=CCymb [>~;E=nկ6o^!-ۯ1xO?tL1G-to-Zo^aD!1u__cW| V O>.?>رV2v?WG'-J~)@\I(zG-WG60-S+Sj1aQ|J?C=aЭt(o˧6Π1@2Xn'?I`dRwoϭZ-hZ}&(U+L֭yyyVWb\%i;|x^oU \e{OgZVk錖;ȒQVz H+8`d))){O?.wFj/))iv# Fy>]+|AD:qLp,raKI Q6|?0`w9yJ߿n۷jժ:;;?#%%%F9Q㏇ ҽ{wwwiӦ>}hnnnnbJJ!iN||Nspp/"##{ճg &TWW]K" tEc) UђI錗_~y͚5{<>>>}م vexG []\\}QرcM?DO=Ԕ)SΞ=W_=F /CCCO8!4hPVVV}}g͚5c kmܸQӉ ֊E933Fy>]+AJ:q`dtƥKFQ>{]w%Ho=5n(((b-...錃WUh=zI:o5{,555:ZC7n[ (ϧ+z ) Uђ"__N2Eի׹s䷤hnn6L14-455MRȅ^{O>/8~B:CJH?㏻I+ڵٵ\]]E E1AQOW :@`\-=1n8'M9r B:lnB:øNo/^O]۲^{/^TڣttEc) `dRM:ԩSwqGMM?k6l߿׮]j5nݺyyy%$$%,_Z.pҴ=~źyq0f}W1@2X e\%".o߾5k$Äh8JKKc EPU0ZBc ܹ3==]DJ "E4h z HFcE'?`Z"})..Efff YD&Wm4Vtʸ FKe,RM:#o AEE؜}^iODf#GtN1@v0o֞[`dU˸tF&Gg bZmiii-b{"D4hD :i' +n ϸ FK\ۤ3tc iʐ@Ё0*C:A:œ@ȫ @Ё0*!TtP@ex(t 9W~A:œ@ȫ s!23tc iʐ@Ё0*ã@Fc/c?5Qw_fϺ\"1?B~/h”kB^6#gn2_٪ Ӵ&\qݲflc_WIIgvsfTW:C4׻+ @WId PTT٫W={N0Z2I_|yΜ9^^^...||roooV+ojj?ztF4[ kTecccttXcFߒ0!c1!\J[֯_?vXQ~zǎ%m;;{ᮮҍ]v5{qj/^h:V~).M oҥ@ssJX a}DᆮÕŰnJ`c=V7ڣ#lhQQQ{1bʕ+MҞͨ ,}[:(VIhV9"(CCCEcK-;t 0'ц9$$$5550ߌ q!'+.Ϝ9s|MhxK??R׽!free L/MqCǢt66(mTCCúuƌ3>;=Qf۞ΐ$ wg)%;;vEc2?s!O:NOslll~Dyҥ:nѢE[رcš[~~ e7atɒ%aaabÇK_uPFpppLL^/))ϙ3gȑLÍoȖcQ:Kiu lɓ' tKQf۞ΐ$ o7y Zl :@@ȓθYxԩS:$uV???'''//P\'//]t:WWWq5hIl߿{VJ jމ|q5%*n7d˱(ͥJ[!ٌ2t&V/|@۝ :tft 0'tx+*K˺rp+hxEĕe?uNMA)'*/hOjet JnYMܬ@eq49e Mv Z}5HGx()K2-X`̘1䆇/\0*kHc:C)S\\INnAA[}}=1œ@ȫC>M3S-..xR̔5j /P\\xԩaÆ-Gzj4{HJJ2e j* uuUUU޽ԩSΝko9m4|]EFFիgϞ&L61::Z,xb/ҿGngѢEb 3g[ZxN`yDnݺ?߳gORRZmPPPAAXSS}||\]]Obٱcǐ!C.W݌žtfIA&M/`!1hР,g͚5c 錹s熅>;hEgg纺:H 0o<7n\EEƌ#D/XGǎ^ō1ٳӦM={:[X̰2}MKKkhh8q?/.v* mGtuf~:e׮]/^hwtt555skèÇwuu۵kW V+'VDj:3>w W[Z qP-Kgggït3́Ə߻wn۶M_\\<`Fc9'i:Q:)S>;cԩf>w\=lLg\̙3W\-**++ޝgŐ+!8qbrraMEA;CF5q1~~~`Tg6css-[ SKIIIm5s!O: ttFaaa>}_6/w zK.63uxzzzCCñc"##%cbbz`DD<Μ9#G~aPpssϗjժcD^]|M0)\d^ccÇ'OlJm'{NȨccYZVJS4=u"S[nn͇v +KZۣ)4GnO3,-k!)@1v Z}5`ox(Y&Ofus!O:8[AvP}#2}hL?jԨf5tppٳ}?ԩSf2::u[n _v-~P *t!!8qիEz);L{݈\o~sw.fa#&޿O>yQFK&%%F"Mh/_3g;#_zt:񲩩i>>>ӧOrbEEEzٳ {K)nt|rqjrss-뛗'-,1hfkhz5+lll{xx,^:Д.z!ؤkѢE⭙3gX^~ǎC ]bG_n:?l Jzm5q9?? iӦ.fa#=ܟ' u...6࿼[!!!ٳ|5nܸ e\\܈#JJJ2mAeeeן?~֬Y3f0{էeKKKsLLLppuEGl2qoYk(;wnXXXyyrhhu[ 1{,vdc-,&mVyY^o߾iii 'Nxm?_uuuK۱J ×J0f]v=.]zEe<==.q EEEBRtѼ MH:yǸMo^Ǐ_J'Oz{{nFәeuJeeTu7m4ydQuss~ҤIB 嗾GʢYZ0{,vdc-,&o*Fu] /Vk0lK:.`q'Sʃ-^ٿt钓bf7hOL@ȓhM @;.xE ŕ_.]ÇwuuwU\Ě]… ^^^e˖Cck(4l Qhq:UҎl 7+by?wܶm}vl-=.lIgHݧN*?WŞ={Z;o߾JF<==M0rqq 'i[KC\;𥟟CC+WkfaIII]$@ȓ4c`cǎ=;p%K766>|XϜ Vrss []ݖtҺKHHKtEnV111z$""B/NG)?{ZPڑM(ț7|Y\oݲe˝wiZjرcfccS:5fwaM3ϚٺJlba#Ǐkɿl"n͆3?f/Bt`l,uK.:5>>XSSSBB222[nsrr kŹmIg((}wJ:dYI?(QQQ)MZPڑM&b/؋׮]+v2dHVVK\G҄XgŔN]L:3$ͣF2|Wppp?A=h#WE瞻;Eݻ 'UXXfp+ 'ѮxD @Ё~h%t…F3ҲG_SPJ3 Վj,OݑMুDʧܬ}"Љ|cM`z =Bެ}"й|#M;?v0n :`]3ZH:1=zʉMbiœa!P+t sNCux׈ sY@Ёt{A1sY@Ёt nz;a0 W#:1ގA3 ՈG @@yZʐ*C: 2< A:œ@ȫ ? aN Utt 9W:tގh:N!@Сv 9)ma! RQtc_AB;Hg0oT4$r4=h4˗/jAAAN>]׋yyyRA6[TT٫W={N0l@~-XxїM4!b:_ihKo4Ǘ`i~\\܈#JJJΞ=;mڴٳg?11QzQ[[+˖-9sf U__Yf͘1raܹaaabˡ 3v  FuuuR߿P* BFh1~AJ~~~ˇ^۝7###44t1__ߕ+W9sʕ+_ B]]T4{D^^iA4N!O# ڣc(3,Yxa;&Wͤ_~ tRNh"zzz744;v,22RlpppLL^/))ϙ3gȑ׉De6[) i:EǸ|G\:Wkc:)!!߿{GGSNrUU(:ttw[nsrrۑ7j QQQlrI]\\f4N!O#@w3Q6ţ@tqB7n,S76Kz2>Hgn|;F\GhD.teɣ]RI 1v *Gf޼,3I2e ?|kI LJ[32F8h< AtWNt{S^{h`2?h|Zt57 ?B@С:t?<>!Hg КC_CђKji+?B@Ё0*ã@tc iC@eHg!TtP aN Uj@Ё0*C:A.FC@@ȷlN3=Fjk1yipKܭf2LHz=f̘lRFc?B.>`<"`AG:fRsy衇ڳIyMhxN ^655͟?uzJ_0o5rZ6(((77l.c&yyy'm(22W^={0aBuu766FGG=<</^lt-ЂtP[[ܞ t7n\EE2..nĈ%%%gϞ6mٳ-gL/ &^?Te$:::11QzQ[[+˖-9sf U__Yf͘1rΝV^^.j3Μ9o tRɓ70ܚxYYY)5ٚlڴiɢbŵ&Mikjjt:9rD*n:CַoR:uMY~lxݥKYKwíY^2fkr///QزeСCEY̑b(;;{ᮮ]v;V{E, 7prrjhh0#`tY=oܸtHg)%. I:\YYi-Hg($$$$55Uʆ7###44t1__ߕ+W9sʕ+_ôٚޝgzDF-`{/ ߿O8)}GG i< j :%K766>|Xf&fʋHg($66_~ tRNh"zzz744;v,22RެR̙3r~MȨ -[, >n׮]{ŋO<_Z?Btՠ|G\:Wk9Ԕ߽{wq!͏sqq jމmPINNSDJ:d٭[999yyyXyCCCTT/椤xDaʕJPFc9'i:z&;?;*63z!0o3t/}㧿xFГ~E+1`=1znvu݂e1@|;F?}"Ex rn1+KJu bJtyVKܩHgw}1錍.#2WNt 9 TΐnptFRشs 00򝄚*ߎc>\msO.h@sytFYZVm9=yWD&S R?LrXhg7vSj J9tWbc)hFSѨ"@'G/ݬ d4v:1h0TCfK(Zr\-'ha!h8:1?B^]Hg @@y!aN UF@Ё0򝍆&B: 23(P #5J.X`̘1pn .$sV6AUw[r-ZJ})..*W_PPV__O1@ȷN31ԘKt9V0; _v-A!:38:J錉'^N[4))iʔ)?h||4 pCc@ /kzh"󛚚ϟ:}t^ovfEFFms„ f/嗍R/^l c߿ѣG |roooVk|}}#ߪñP| B3#Rv1L/tǍWQQao^qȑ#Ώ1bDIIٳgM6{l7kРAYYYϟ5k֌3,_ϝ;7,,,44ZhEgg纺:?T[8q͟( b_=ze˖͜9VN3vzFDD+(&Ǐ7}wRȑ#VJ'Oz{{ݾU555:5صT͵zjRY̗/¦M&O, nnn+V\43iҤ[u8P) @:tFssZA*_x|ql.]ݾYÇwuuڵkrQk:(]ᄈ0{.\-[ :T48P_60`Hg3,{o ??Rf\̙3W\^rwUTVV //Z'N(?!+BHHHjjj`/͊322BCCoRڦ|;zQO>$Hg숛[~~k?32Feu%KĖ>,}AtgzzzCCñc"##Ucbbz}IIIDD<Μ9#G,NZWZ5vXtKHHKtE1|KiJT3fLΕ)A;qqq...Vpquuuww_dy~SS޽{@@@FF錭[MyyyȫiZooD_W`S XWWקO"ˇt999N媪*Q>t-<÷4N "Εlc|}z{1:U.\H;0wZueUeiYW.6B:AJeeG|@YZFΩ)(7t1,Y_x:Uo9*ܱ}#A[Fcٛ5h!~d4 @@Sw>,9K(ެA @Ё0o?ߧHs~D\HSC7kC.lͫ fbbbbjiˀ ]2<ëv IY ݣ_fq+/Vqv3v"23e'^ܾf y+u{0cQ:1mz![nǠ\؈j@Ё0o[ueU]o? @Ё0*C:A:œ@ȫ s!2< A:œ@ȫ ? TtP@eHgh+ 3N4<<|…Ur C@-)A:œkֺW^ӧOqqq8Vٸ77z7JYhܶ;T:1杶՘Kt9V0; _v24Um#Gqupa]5Y>>>>NNNG޺u[n](?C. N3@Ё0'ѹ'N\z5 w4eeliF{Hg|wnnnqqqNjbkqt{{{wϯ?}͛Geo~ڟ!t @@w爫2N`xիW/EY4|WWӧz7(22RlgϞ&L6{+lllxbogc=j˗KJVk|}}#ߪñP| fΨw1dц%WXafl'4[[]bԩs5oN6Ms LsOjo~ڟ!t @@w7t7n\EEѻoFxxxu#G:?..nĈ%%%gϞxg6} U__Yf͘1U +///++ 5*6Zٹp;Ǐ/--111A\'&&W=jkkEyٲe3gμU* ^ b2jƾ}5448q7m J0K^0"ߵk}Zc(^݄UUT)**2rwwwUP'l4ʕظl$ BnHgݤhxB]ze\pVK_+W6~Һw,+F^ګ4Kƍ78XC|j*11cǎ2.S_8pJ%aiءI&2*={׭[Yf7n4ݻ- f7a-P?#۷o7(^:kK  B3^ \l_. 999f,8ti7}YlBCCcbbBBn_/uVc:-M[wnE9dm͛jhW-l&dСw>|/%мyK.G9qDKu5d@s}I&իs 58p@ 'N 8P]SNӦM+((8y3D^D}9R)R;~z]\Mh?HgZDD_%+޾ӧOО~ WVUV36l$PDv)P1AttѣG)?Lٽ{t: rL:\ ";hA:^QZ5xxx8 A.tt T 4sP q錀raG \ HT 4s 3fȔOOln A:@:D xvGXXXll2HgHg]:l +3eΝ;eΰ;@@3Jg r3gV4:P1)%9RS] Dv)P1AN:@I 4:P1)}Q(a"F*98/J:%B T 4spt_tnnlCB<辨.uWwՖ}.q%Ig}Kt-3G{B3#!/###*Ud^3fL:ux3gZ~ƍ>QF/bAA:~eUj߿nnv6^S6=k,h_ub8… <==;tpA9ciKL:+@͜Hg/;;O>w􈈈=z3ɓ'- @F뱴C&aw сfN3̾,**zW}}}O=ƍVZV㭦36l$ke9<ءCOOπ =TEٴMnݹ7'XMg"tθoܸUt}Q3Ə٣Gzt8"pt_錹s=Z7t8"pt_~lrS(h?( V4:P1)}QgNgh<SHgсfN(ݽr}ΐ@͜szuJxx:^TT#󔤧]q;*UTVm۾VL8s"F*98/Z3'=Xe+WU^FEEɻ=v[z PF o6h@9G*VhtbS@EKΈ۷2~zY_ٳ+#Ǐ8p`:ujժտ%azk&O5{lWp€OO:QF/bAA:dd_J&ds#F^RFlҮ];7ŋǍ2^blIgXZVV>f)d̙3 G$+b:PAW^}Ϟ=+'Nٳ;#gΜ_+[NNNrŋǎ;j(=?044w^^yu:tM֩S'ezDDD=N<)31b„ ζ%Ap)IZ+eD}ڵz^V/;y䰰;uMlLg]ӧO)MgUIt7nܜ9sdK.J?yƌ2t|???=M^ (ㅅիWWƃ;/Z+e䡇?ӧ;ۤIÇ+㲒@2Z{gXZVuQe\6j6qW%I:Pm۶-..nٲl޼^ iiiݻwVUT1_,**A/7nկ\:͛7mLg:u^^ٳ'<(5?fmMgUIth۶^={v6mw{ׯ_#mt{f͚i\a2((]<5i? m٫7o&&&[ROgru]dELgpW$F*9TtƜ9s֭b nm͚5(O81p@#su>(44T1{ s ŋ80x`{7nM,P{ raY$11QfI{gt߷_:C;W^3,xELg*F*9Ttƹs<<<.:Ne dՎtDDիwddv'ƍjժj*>>ޖtTR͚5۴i;(w0^^^Zӳ]vFʓMdDቍ K0W^>}zO Z:phtbS@9Og߿qƎB3@@3!Q?>'''33Go# 4:P1)qHg's=ztaa#@͜tJ V3P*D:h׮]D* @3"pm۶Mȑ#Sڿg1en F*9Ԅ0PVI*$Iӹd:gV4:P1)大'%%It~çI#UH*Rvv6 4:P1)222v%ф5xB&F*T!H3@@3GΖhzzzjjj~:!66Ry IE'F*98tAsss/2{ݮ ʗ׈)rqjjjzzzFFFvvv~~>Q1+h v|U-\.Y̙3S:a:dI@wfjDBBBRRҮ]$R+bF`XJ\ђb}۶m,YҦM'QD;#***..NZzzzvv6Q1+h v|U-\.Y˒;wV2n֗_~٤IM~gj@F,XиqUj*55u͚5^z.]N>X lBBF@R_~ߥN_uñM>TbxӦE&ժUUFXc 46giHgKW "#* ӬI/ NA.6ի<د_hT7n)=qtաC+әe^[o .'Lqv[HȞ4i7Vƅ3)W9s >hL2qF[z:wޛ7o.((-XGU7idӦMoUVծ]ݻw7o\}huQbx+P0ʿs[K9kscS:vl:V1//eÆOD:T]b(C۶A_~9MnK*Ϟ=Qs3@vٹ2"qԩobbfJWǥUЈ]n]l819yQ-(H;x)[Kڵ 6Mښ@KxG=z@Xc3) ̊|Cmڴo&52ROglذk׮0`ƍWRE}Pz۾}Uϟ?_}h)p@wwwPQ~yO>A})'C Njٲ:neر{ԩSV2ܹdӴH|Gi^Mh+W~$V2׷ΈO=[M4>Wԥ""@JƆwJ:~׭2ݻsJ@-Uӧy3@vd 5mkt奍;lv6eN)Mp˩,-|xWѾ}՟X=@;34bתU3|21*c RseVKP35̆={D~Rnme\ΓvZixQRތOϪIgةVnŐs9fCӋ63н4tݎlfۚtV^vXc6[u"v}D{!;o:#a5v^z>dH虆"߉w)jj@ߺKA:tb$SLjvUgرPuzllDNmXC.{)t=5 |Fٳ'Py?EDEFfPF)2t c+ a3LpYMQR%.#<ʹ hŐ{Y})gꗱUZG`ĉ/*!BZs 6lyy)l5QNmE$ȱKggoV8@{3c?6m>qb 2j:f@K|۷˟j:R1W)j5vRͪ3x{2<ӖqbHm:d+Fal:tP>*/+kիiW`YAN9 t]bjP[V t;}7q5fΰ ̜9nР09V;6ht-+&fk=??M [/;kkKH\4 e'ͶY%9^{(8vTfĬ8[t^[]$'F.}o5ݫϾ%luv6 Ԯ+ٳIk4|6N:ngݦonyW;li67T(+#H90aīTk񗂂ԴUkҤ7S+9yQÆ-y4|gu^dZn˗w˧l5^c|CKٸz:KK'e+q.|w & =lX3g%yM#q/`jNZj*Qé@HH'+%d䯌U:]ҏ'=[!$R&~rHg*B_.Brڤ f^j>jU g潣FkgKRᔶmdF|[zv5FeDB-_;Ly58lkawِt';JmY#[ Ni0ÔNZjSژи.9sޖC=acvt#kk>!mr0Fdc3n|d4PA|EHgP28 V8=}~* .=ر48Q"Kg\p)~o9#޸qu#ziְ2Ar~D R+HY&ldl]Q+tqDKr<;Ǹw/[n%K,_<666%%E5QXl FKENgP+28&VWAdpXl4F;q͏ }ώ!Dy¦328&VWAdpXt¬kkvվƍS\l% /۶GsyOWO|G l"iQQQ ",j4RG >P* .ܜlx[ǘ$o[~;g)f111IzzzRRR\\e0 "Ţ>Z*hS(qDKJ,rsΊiYTm;M:Mn1ϏC[صkۄ50 ""#ET`DbF GƊ WA"7Y~H)11_.Sr+VA(7JMNMMM))))|jNU-*yDz6}'[v YjP_?^JMdDEE) ҌY &877WNˀ))))"^O4R)VT %\%9o:CX^yW'D }KX+Vƪ?!P9t#߻}u7WXv,Z!23fP9u:Ct-c>v N\\ܒ%KCF-[qƔ%!3si垛^h-c>HMM]vm2oxwrTn._y)))ʝNedΝܦg94u!9tL[0LguPq:n!;:\(Ǹ:׸wrrő#qtF~~s粲N8ah+㉿wYJVȍB(gHgC:Ő.'C:Ő.tp13!\ bHgC:Ÿ@:+v ykIgP9{: {t3v S4+2#@E錌|x}+6ܾFJRHKK|T(Λ8x[ȩRM^p;Q.۱cGZZL$@iGr/o8w\VV?)鑲v}ZZL$@D2Ee8q"uq!˻19E&* >K*Kge6\29cǎlmO'2~R2\@\錛׮'v kg&j.C{RON + ӣB):{j.C\ئuϐD ‰.v Y[~O\P:r}tԎԷhP9K:#s7xv~P.0ePfHd*E%"pt٭{bv gnِPRgOn?.6GFrGu^=B=͹ :5|{hPq:(n?ruǠ\NO(_7O:E(8] n![:Vj˰%JF̡_tpO׾x=;3<<_fgUTD @"D Φ8)wIJ^Bl3F ._,͜qףvAZ{6uB#>@I?96+T}ËsOH w<ku𰓩׮}%l$&y$XG .v!s`]ЎQ/_^@y]ck?K/<]pm F vM J3J+L + R_'^E U$[v3&P 9W{BN ?;o90Rnpv[lQEEElTl$Rpp2͌|Xɿ/3֬Y dX,#o)idɿlߗ/_eH2;eK'LYr v]XXv"y 2fF +z7&&F`lWl!Rpbic8 _U&8k/666&&fٲek֬IKKe` )k:Lpϛv#RԤ%K,]t[lf`#S w6}~>e傂)f#`8nvA۞k3s}k|#n{qqq[(u#`gwO7&ʫU߸w*\;`ˑ7Av=4{>:6J 66v<%egg5꾈XUO|uvj`ʕJ@ --MfٮZwp o_Id!(Y ;H)H)#&n " PHBT!R)U*D @" P "/nx:.hs]Bc鑂klj]9_D ?)5)*.(%)=%?)[oNm'-Ql{EG kЭpy{xWoJP Q/eu'-Ql{F '9t8IޖO]t#vsKtH꯳?S[cyF|١8m1H w35Lp鼼/L_/-tH꯳ڵ]^'Sc9_hvhyb[mL,1RP{"3LӘ5Lpȑ۾K zV>]Sd,:[nZ}ݻwggg4;<(("3xa|m޺`ɳO#d oqzFdrr-[~'e mZps@cbYk+/^0Ȫ퇄֏<>I> oqo\211Qzݻw4;<((IJ"?R$ϰSunO J7~n\}0m w˦^d۷oehvhyQQlE 2g,&݇] uJD@(/beX`ߚ.6n|b.]z-[dgg4;<((R"G?FY|&P.( WR2鍧AYB9_hvhyb^))Ρk (sپ0+sӷ f:{;;;ZZp!l9HAQVV{0^`s+w=z ^;{??xm|}w,-aqcf͚lټ:2p1UW($%?ӬYS9oXxw~.]d˨QnUEf3oC|sK\ѨQ#Ϟ=qȐ5ꪉ)ޖG^v~%^ i_p%RN,6<e.Y9PޚY`ٔRwݮI)gPHkur>ܴi]VS Z_(+&Qzaykymݱ1dowh^@:wysiҁUTԪU rmݔO;u߲壒ĉƌjӕ6mZ''/p;}6\?vؼ#~W<6b@m>_"eno5Ao`/K)ޖG^̊o+5H l:r:!'J־}[KSS?ܸ}ӳt0g)/=DH+h> ̿.v#jq#׿'u<ÒW@}?h٭[իw\Yիz~\Zq̵gO~ѱtD klv}-'&];bxUݱ!!$p`QуQNNjT`٤Bԏ֮OgiѢ0HнǁHGg?e$"#OMR~^( ers?Dr靽}c}gl57_޺uV4yFtIy3r"E ^{^};b@wMyM2Z*p`9VpQMd;w.M&'*..wE Y^XM&V"yQAsj@}ffO{IW~mc5J>},/oK饥?]^)=Vg5E]wUG eܷo|VZH{w͝.\NI'j`/uN$qEմ f[SyoӽXt"*N ?bv{睿ԨZ }]MϢ:r iR]A^ ɢ^'gLɣ^z)BҥK@doOO_q=jס]_~zq`jw垒mxS"?tq3&ffa֋>ڳξdf&TTt\چ )E 削ȑM)2H8y#9s/>%ke3e_}HVAfy>]A:\~H鱔]5o$'fosʮ]6m"…W{w7֝r%OO>^mg//S}ys'RIHwhsKJvLR@|Wy$/`XFD X]ӹlD\L:4iw-6m400p˖-wrrիWNNp]+VҥKfGu<ː!Cd]vU>եLjԨ=tPDDK˖-ZXXht.!unLAw@gϋuf(n葂W^yeʕx$N߸q7|sŭ[z{{{ꕕ%hժ?} #d!??g1b^]FFF;w|'2o{1mNRSSKKKϟ??q1ckڵ^^^R ._I'''] |١8tҥKw %-v#i;R2VZF vUo:ꪗEyyy%4ݻyyy{R~A=] r .R ^{M};buDo)ut}5ѻ[իWĔ)SZnK/j8qD@k(=O)?47:_w]d=/ZAG ~MD MD G{7oޓO>)_ewgʔ)Ǐ|rYY7pC|١8p;SH"i"ӯWrrrJʕ+Zռy/*鴴4Sw.>h;Q;j|١8m1 #Gԛ(Sd$6l`⎆u)8pM-Z(;tqF zҥKTTTII?|=͵vZ).w4h;{^&^4;<GM6&#Ν;zeLWҟ|򉏏ѧ$y`{vttlݺO<>jq͚5Suk׮]6mTzs>)K/fx|{ٔүuf(Ƅ0z۷oONN^rjʑ*eWhvhybhL{-[$&&Jw:BZrUZ"eggOґ$''Bvf(j"3s풟/]ݻo߾Z2-D*DE*GHAl١l(vq"Gaa"ٷ&]-TTTTQyy9 `{-`Gݘ)X<h4B@? <hpGC5;@vDPHBT!RpGC5;@vD5;@v)O-ځ*D @v 4 R*BJb9v*!bhy)$Hg8"j7 ԪY9D rcM5E[>6^kc8Z) R`cSNEEEuv|+Ű«[)Y-I)~~~Jzɒ%JBtI:t(""ťe˖C-,,sիzzSz{{jj)|о}{GG eիWg͚}>|ܹs&MR 2$77WNgΜ٣Gezddd~rrr$Q&O-ݻKZ(W̦8;;+==utt,++3s gYUJn% FmV;"ҥU 9VVPP`_# 5&3K3kU5ya12 ZzuxxxV{9-V̬jWK{,>>^ծ5ڕQj1mJJ *Gꫯh^՚^^jueNB#D k̙3箻RpB//s* tbb9r$""B=+uss;pٳgGZ /9}>>*F=fΜY\\3h 0@SHCzo,HPf0V˜ZNCɓ'###;uTm]i̪*[Νj¿CCCo} BBBx 3WFūPp2ڐo۶mnPnU97ڜXy'hV՚^^jueNB#D nH޽+%&={(_cǎM4VJ圻UVK.k^^^rvkztwUYT@@@f=*FFFFpprEޔ~رRwwwg_ﭙ%4)P} 7n@5U ]Լr5V[fVuZeee/;RZu/<@fTδUVWѥKTTo7hU|Vi_5b]uU+ND)@'xnڻw/9㏯\}^Jw(;z R"w&M*((8zh~׿I.VQ9W^裏Շ莑cԢ\jH-X^0;*GrIOOrn}]%6 Z:.1 RhRZ kؓK n=[!%mk^b@wQD ~2ŋjP{詭;yVr"J%D 07{*5-Cy%{=v`Ҳ3t8@;@h {񏂍;p" vHh`u^D@wQD F2` YsgEt8@;pDWw*F/"-FT!R)U!@;PxJ"@;P߈@"i@;PH~6vv lgo gXX;qJvH-d6q U[n@Ϭ,77RUZD ;sQFFFXX$o>O2y7o]&PYe˖ 4:\`Vx){Ǐꩧy/YP)**JD^x!;;qqq{5#) |)1b#H5;uꔫ9rusN6mԨQJF:t(""ťe˖C-,,4:bc޼yz>0[~_g&5jd:9+k5Rl޼K.۷_l???G,7R rJ:,,駟~[nbީST'N3fH񼼼}ާDFF/''ܹsFL&)P]vMƮgb|H ՘$$R R7nl ߨ[$)^3Q-!L|oR {kf]Ȧ[;w2Æ :p66 #F1>#G4~kѢ??K={ʕ+W7"#5رcnnna"RIhxꩧ,YFN[mf6Z]SLLz= ! RXb[R} o:|ڻw︸'O^t)''祗^ 33R #GDDD9{1sbYASN0`+}ÁہL׀^ ȢdJEEž} VHZ$IL6M/:Ye˖=ck봾EffzUU/yRRRڶmN_b*#D 0ˍ{8y={mٲeСΎ&L8{]~}ǎ4iVh"u qǎ+nkժ z|u@@@fj)PF 7nP/:Yݸi}^̦VUVIad+w%55U Ki)cH@wPv~&o?裷n/YwyZKHRVAvZ##j%&>>ΣG...VWTT?c޼yzIr6oܥKGG/^ FǥDyǎlܹj+xTz=¡C"""$˖-ZXXhbTlt->35ϧ~*St`:@$d0̬uaaLԉT|4a֞),4Rp7x#({EFF/''ܹsF|X2 "4nXXVV%Qm@k9;w2d n>R SY[0[%BL6M̟?߰欣<欸rjm3WDաѕ2Q H~E ǎpႤ;v^_K֙%]PPNZڵk)))F:RPk $tҳg^rE^Zٳkڵwu/bt 欣^ZG DDuhtL H~,1R |?P ,߿***7l0%ԩS d]G3g,..4h:]k9Ç1Ld۶mnnnv_]OAhhM) y72MLL,//?rHDD] Y4I'''wҥTw 欣<hm+:Q+ ^Z֞@pj;`ёk׮+TЬY3'%%)G 8;;]?###88XEt}`t9Vرvjj2122UV&}`"RPVV/*{74iN嗕gHB~R-%4|ks}jѣG.u4֑z1Q'ʳdƍS塕Ji I%yRUpbna;`c-; F{eKHMt=)(+3v.nII&=z_~_:h8~XMA{,v9)` xyy ,wkM^b@k^vAwz8y|ejPS[w)P @w@=)ܶ'e_5FDG2uhhx,-z 5龺Yϭlѐ"haz5~H")p<@ $ 1D -wыx; ք-ځS@" PH ք-ځSք-ځF@? <h)O-ځ*D @v w4XdddQI::mڴ?> pBY򋛛[dddA(y+"۷oXb5H`F9}t)ӦM5j'║M6yzz~TA֝"o===?KHHh׮Ν;+HP/t:88~"ng'5kPQ, w4XV򸻻O?uqqIKK@gĈ)9r)֭svv36 <lݺuTTy{!Ho]]]cbbj3"l[hy._,=~`޽{ؼRhh={Ntmv޼y66 L駟ݚ4g hmnI|M2gddff֛lJN.wwwh„ -])BZmd+$!oϞ=ۺusΩ)~Ve@~~~]D z7 ~hgԨQ'OVO>Ǐ۷o9!!AFY2~.Dkk49 Wv)55TF'N3fѹVA+,,,Ҁ6nx=O#%3g^Q.PF?p\\ccc%}%˜FhU!Crsseh:s=zff֛lb$f̘a:ѕ(JJJ߅ L:U>ͫ$2m4%УyI&~Ve@^y5"@D T2$2= Jɓ۷W~~~WF Ӯ]_\@y{QyyyKk:t萕NԹ|+V\߯,Swjz2e^aYAy@IK Jb?pu7l0%ԩS [G3g,..4h:]k92ԑL_۶mnnnvs7 ;ˏ9~7*} BCCu+o?E5@4I'''wҥT欠<hmk3M+$JBFWJkZlc=VHтާ $$7P?{{=K*W~k׮+oЬY3&%%);;;ޒ=###88XAEtont9Vر!qjj2122UV&}`bg]~ҤIooo)MtZ_|Q7|S.9>>_պ! |>c5OTTѣul SHֶ6޴%7NV~+QtnСCG L/l_~Yy$ŋ7nX"Êժ̚htssSHfީ+e))hy,`޽|ʕTmo?裶3;zR|XwD.!!]v;wX"@/++ӝRZZ׬YCE4D znO?%--`QbĈ)9r')XnXh&t9x`֭gCF ķ~CU);`-勥G?o ?ڻwW ݳgOՉnmΛ78!Rls< @l4Ԭ|k7=ghu'7H-)`4ĖG`͝!_u}6ͣq3" ]-z6nz%4X @ Q^BjBޫtKn7(.WㇲA4iҶ&Rʹ]sF zl]GrH`kdd]Gc=kJM"RԌzA\C 1`MXNˣ\b$ @l OIX8 o+3@@? <h)O-ځ*D @v w4X<hOIU*D @" w4X0\5+bKE6m{ݶm[LLRYYYjs纻K&L %+snӛCV]Yj߼ys.]RŋD n?n$;`i- 95}z?.K۷} jqMKÇ*Ϟ=[VdРAE{V)E1c޷V09tiՕ1joӦMBBByycǞyWvH}eeeZcݣGo鲙+3c"ʿs!Coذy晷~ۜnǎsss -]ٳW\\ ݜqּ}􉋋 kԷo_K(ϒ%]PP`LÑުe)P$ k c+)Ъ+3ˣMk}+]iʂH}2+//g ,XпTTT۷O_쉉2#Gd^u.777\挓3g]w- zyy͝;pZ%ѣ̙3srr N:uV(3WHZ$IL6M/C}ls"ZuefyiVÇ߿_V*%%m۶,~ @w[J :UVM6}㍎ ^*c򀀀f͚&%%)ׯ_߱c&Mx{{KuHY]n8Ykݻw;88>}ZҧN={ UŒ`-R@tر>0\5\}#ʳTƍe`N@,V6ʿj*.K.,ۏb6c;q%yRU54ǖX$ϰYĀH 4hy$/!u#=ξ~.1 R`- $W]ЍWt/1>lŋW{%4DS[w6vH_M|y'h?eg`m)4hJ eߍ]?9`㎆\!D WM˫!_D; Zup۞5U{w4P<@w@*;xs.")4p"i@;PH~hyTᎆkBw@}) PHBhx;;@xr#Z:[Hh,pjydz5 DnMZ,$UjԨQ˖-O渠 RwĈk(]v/i۶mnn.6 -O[ǎ W^5kѣe*233 K,Q2E-СC...-[:thaaѯbc޼yz`;kђ+9-Z۴i{w۶m111j%Hs纻Ka&L +U3)PM:uԨQD 0<5߭[>.]vFFF/''ܹs2}₂n\,koҥgϞrՍ(邂dff5j"ziHZI^SjfC?;eǎ>>>|j>>,{8}@,\:q?p@EEž}\]w5w\Å{zz&&&9r$""B]l=fΜY\\3h u n p&1"ziHZIL6M/֪Y ]~``7|STi֭w߼y԰JJJv+)Pk׮޺?::: Yf2^MJJR޽Ӓ>uꔤcwرI&޲u۷_h:]cǎuvvvww7|jo>*RqƩVbyLLZz]2n#f hV &*x"zM WI畩aZ7 Z<<ߚ53Y37ˬm۶}~[MǒM;œ ԥ9P ҡ3gN߾})InYvӜ={3|X)%S`b;wN]Sc+.՚h/0Xf5Ij)0g0ve s*foǎ_;ud_5گ-[ƜW[Cc{U{,s" LeF2_vGC[YYylܸQ/=mI&52o892X)t=>>_~R1֧ 2jp/[LK.uww_hb[SRR.X̙㏫w_#-󖱚)Pk"/MZ|ywܙW%11[n/6XY୷Ro%0x~[`^-c5VCc{+)e ޽{Zu#t4o\NNN\iii~~~)0VT{{{ .HСCݲeK.]6m!˹aͥg5ydggg777go-LK/[U3Zaaa}i]E 7ILSg k~՗_~Yy<KfϞ._DHHH6Z0oc߯]L֣گ.),4nl)F2Ŕ#ߏ}3]Pt.B )N0Xsai2'd ~AXo7_8@ Shtz v}IDqLx4#JL_y+2eʔoVQd .n$Ps?Z%0mdAyG3>8@hD*+Oj1(%dB`K "Z "YPGd @ m( S*Qv9MP;`IhyOIXDāF@; Dājd @@F@; Dāj`IhyOIjd nַJ1d4ny韓)W:theeeWDnMo)YLgIߵbkk۪U^z999@#}y?c2)P |WLb2D27!Sp.]=zT#zڵ{^^NOOW&XjR12^gǏ rqqiժѣsss ~՗SL۶mxbnZeʔ:th֬K2K>ȏ>(00ȑ#?_~wо}e˖)/ZHڵ))) Rۿ:ujĈx>R+󖱚)Pk"wyGgcff5/_޽{;wUILL֭ŋyW7/]FmlFK $ O~q}c#l|7Of_< )EFxTG M@@#yT1EM1 v<7SG#O(C_y+\ B2eʔ:zd s^]zpt5YipLv\"@7,ȊJ1nhޟd4*W qLQ%Mj@૗ĸ %Y@Yϣ 3(r>_*f&*˻!19@@oDN"@}#S"@52i T#S"@5h$4<$jd @52ae/$`01])`?;D w4&MH/!gC8/)j}Bbp,d ap[k0S4't"8_;DZMkLlbLNNNMM=qDvvv^^ @!򀣘 ɍ~mڥH@Mib ۱cǾ}e;Dps[A0qW^aaa={lo2@c5o~nݺieSSSmVLng  }?رcӦM;u駟o)vСYfݻwONN^zuΝx)|ynnn'N_t3j(WWWYf޽#""w1!!!AAA...Z=z#џKHe]+9ƞAĮ`ժUI{nv<((`rLeN)|RVo۶kΝڞ=*oNNN>';v yLӻwK.cƌљ@}pe=\2mڴI&kƍR|+eR4 &6ribׯ_/-ȉ'mVLLe ʺu&s)K/{1[W"&&F?zRt^:99g -cXt!$UfW:wx뭿1Q "8yz::6{HuyځU|&L)cdnj⎆7=S0lذoP>.44O>;n۶M΢|wU}Wg7Je-uѐN2u/_~/MY ˆv^_/0"==Kl')~r#2yϒt:>SSoz`˖- rpphӦ͘1cG-FGGwN}Jv9ݻwfͤl2]'|mooDv4:K{O})'|Vn1G/±c1AAC\\ZjBF$hs ezkۭ\v]}dĉMT?Ef3oyc;,E+8mmmmuj`QjT)Hr# ~9`@/%_ ec﫶m"SN!0\.rR4'SMѩӽMK>|Af9qbϸ:7kִwo/\x)7ɭ[)|R4`Z* XE{qUѣ.*swӦ4iFWM\{dqg~'Əkp+$p`]ujٷoZquPv4d K ;y;ԭ['9{m+cjNM<ɓǒ)' 6x0JWѱy*lb2 [7YZl!ן )܊~ɒ2^ \p?x RPȐe̘ο?S_~I46WT7Zi$KAx)(gd +Xhٳ?~ѹSZfMgzA*ppac;pQMLvR:6= Y_ ZoyMuf .9M+i'97Q)~@۷VO?|8Z߿4fYYRMN)-kF3 įXh9_7L8yh >LA61LYd8t(rѢc:;R,&VЪ2Vƹ?);9QDey!Svv?Yܗwrj)D-Z8wLmdzm۩}N9}BCg״ܰc…[y.%2FƓ)'qTcо+5}製MϢ226KH ZOrq5GL3v>LU8v5_cFP3p̉/yA&|ѵkرW_-W/LHXѾ[|2kSaass v?i ⽏{^iSnQ>T \⢢=s%ղeۼy4"S4LE=OtFe2[fe/^$ h9_;DJ2VJk4S_k֬IJJ| yb+Y.5q;fa_k [n֭[SSSsss9DƞuC8!S 5N |54\_aaae@c:@! ֐)(/(0 /]C"""V^! 7?D- uC854}뇿+=v4zIzjL "Qle .D9 3e傂d >])y@\?/x͛@߆4@ap[_0im{"*W M  G.822_@}KNN[n*"G6 @!Ll^u~۞ -^O%>i M>`oWJ8*nUjj;$J l,qd"QlĦXY?6rtM8 rÕ4ADDĚ5kxJ"ԫ'N۷OА "E6l" C8#4LA^^Two__e/]ڂ5_9u)i傂$Ef+;;[ljjjrrh"G6 @!LnD\aߵUQ~P5kDGGLȌpsI㑛+16334df# yb&6 |ߒK_Sʚ5kZE +VB&& @]4LAIIInnO~/7f111aaaUBxx֭[4LLV(ɂ=>䨨*R4V)Pz}/NJJR ={-nۜ)x } #l|寔~'e`GA Ef 25\z\,p P ///'''+++##C'8_+?:b,D% ~) S2) S2Xu}AT#S)jd @52PLFT#SY@x6Lg *+vz-WLOCaUH7)4Lai7w!yyr͖WX"JJJJII "֟)8i7L7򲤤$///eN܁o޽;%%EF,3SD9(iO"׻ Iڜ"#I`͙s히7i2FMddd$>Yy7d, 33UWZuA&8rݻߞqL '$ kTW$O_݁&G1rS3- `mtc=QuW 48[ض1S?ܰ[);]N=bM(孴#dP"Yg Nf&P.(Ц J(e;CI` "JI(s}?u&10H`ٙNCե tۇO%llc{,R3Ww[ia$ ~snV= _0)əҽ>3u<@IM?{M^eXR hwOμzï&@xm'ݞ JY ̿.v#)P^{(`ٙբ}nB)Pծ& LVJ, 2ME)L6l"S)ZcIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/images/sequence_from_audit_execution_to_actionplan_creation.png0000664000175000017500000013474100000000000033611 0ustar00zuulzuul00000000000000PNG  IHDRYߦ5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gU{zTXtplantumlxUMS0{&aS0I )СM< 2ITdɕrHp>Cov'̓pTd"L. C^L0Q>!~Tye f @ lnBiAjHIo3A>cfɍ~[i }ckז&\>kvC"٩n Jc¡Ødʀۿ|ZoOD"Ry'xZv7'xK_PSƂvМ1uXCG0vcBb$ $L,Ckaks1 mȚ53ϴWVIsݻ?6=7#o/^|vߔXYUg]fSGi SeOn}䑣嗿N~CqjW؉nG?:.iKC+0&PU]w5. P7S?aƄ79݇>(bP%,?Dқ}Cؕ+/:d?u>\1m^: ~&I[]q1Y z ߵu#oOG?Se%R*//ײڽqwɿzM~SvМ(1Ƅz EPZ!@L ZIhq{d*G_#3L5H:D:C[ܮ:ݘg/]z߽_ >Ӣ̌U',r7N+?sL>ECss u~. c:޳sqS7|+1 jƫhKxѿ>߻/f:*gi/.~*}_m :q?﮻>. iBҖ-ѡTT 0^0&$V9TM)aFZS;̿(e,S73^hҧ>PM\%=/CuUh3sɟ:]h+ux%~"eFRyN+ԙ c0&$0Tː3 53ԟ<.(Ǐ]fpy:uD57xFԯ묮/ӱz^ABEf UvG? KJB2T1Y iG%Nԗ}ٲ.CvoAiog/\xzRTax8y}**nO-mͥW3vaLH`g-CqaLHLr~ka"ONH\ݞ" #[))+[^ԬoN)Y#%_C ҖY&լ KØdʾyOe?i|NXP>~KEJ9^Y~+C4:4;ny$TOۅ1!Mv0CMq&$&a —_eqczzꛟgK0&$Q)P}{X}|,p8YLM}PԟJy8 kPB!U^^٩455`Pjv~c4c:5XXUޮ5!,vݢ[6B2    N   NP9 ؀P7za j V/ `j2b*`iu7Jl@p`%*a@ b; *G;a@ .vʵ|Or3/l@p`KB7z>9W9< Jrʵ_ To(2>|u}\ܪ=E C@4}UΛ>ukMWS `δ}mP^rǭNӏ}qu2O.ó @ Zl":͛w922"驩7bPy>60ԕ7T=33cW% eWd2mġC ڊ>M G"+eE/_,hT*|bXYY BGojw-;rg,ml@p`a6mھ}{<O={ᱱFKGE(&5H[nsF֬Yz%yv%lf|I$߇B5,vbR5S&){.ˀjCz[VVzΝ;e$CCC*=11CgnsvHټy0UmFD6mڤEʈZZ.l$WUU7;n'2ԾZɒX,f?̹kΣ=y0T lH@@`T 0THiI_]VOߐH$[Or9w-+{` 8C]yCݴiSww?66TUU6TWWK_y;::jيil@uV]vڵ"tjbp8lݖbҮe~k"Z0ԕ7ӧOz^wa<( dSyyyggZ~cyJҴᔪeFbXqqdl___EEZn[T2Y~nKJKKlYYooןj5S3KCCG& 0T 0T R` 8CP!`CPy60T 6 80T ` 80_?weͲ#̣f@ vsss%ɓ'SyQŅ` 8C]yC=w?t,G|-Q1 ݘX,usΚo 5 UPVȢN1D"fmF}ܱcǾ}ZoCBC-((zqx~~uTv5djԻnݻwٳǴRgfPPǍkDΙuֽ+ƕgI[SfE` 8C]yCMnO>OLL۷oommC]pQ@ .9rdzzzffFĽ;v+~?::zc$ɓ'k&y60,4T_߼yg;[K5ƕ>rE"y|M2l'`Yh@b|l'`*`*&/ C5o]s*B NP1TȤ( ؀P1T  Chg^ ؀P1T`^ P9  5 kP06+~ # 0_pʵsyFOݳR}nWN Ǽz<֏?p Vmҷjv<;΍o[/kcw,b@p/- `C]dCο8-z6hk|2Ce N bu3׎BR3H*0/@p†L99ex6 80Թ U3x~*d < jZk:'޸DX`iabb* @pB&E'2l'`*0/l@p`*0/@p`77j99N@bs=`vE"ӧO/ηn޼yΝ###ڸq# R.H$[ea׮]yyymmm&X,VVV&B#GX旄T].,G"+ɾ|]+=F󵶶杝X{QQq00M6m߾='{={t}}}ccP2޽[Q֋&{B+Vm۴/YFY&D7-KPHmbToo.    u ի{ ;w5j, D~~2 P`[[.,hZO˳,eC?<<ҒХ*++;::$100 ^m߱ ʆM6i3.-p )j___mmcIII,3Tb<vn:œ'OJ"6 80ԕ4TV@ sM =43==-ikfS6ϡ KUTTݻ7 9ܞ˼ @ .nڴ{rrRcccMMMUUUjSKKKuu2{=2PE 掎ZM *C-((oROɦo]ݺuj*eCU$IR[[kK=ʼ @ >}Z\.߿cIG1P($sF ,g{<3RvX zkr+=#߾}(ڪO B3ƥo=1ؼJ|,bT100 #HWWWvH,.[⫼*455%WaIr::_8Cyo}ǭǮ_[biJ{{zhT݆x3glOowckӳϭoRP1TH Ix?Y=?*ʋ~=;UbLK ruU3>^wTuWΡ9 ,@pª޾ӳs/9! @p`*,:oj|) 80T n }y>a:iJ@p 0ƥ"FC3P rC3P rC3ȁ̇QP r  b"0T< N X0 9)//Ê|~>; -NL5`0Dr`)ٸq#S <)*a+3QBl#[?я~#]P0T u*w53Q*|ϟ?O$Rpw`*bv}(={V"ŋ `*,in{WPU7T; \.Ƹ)/̷c:1 u͚5O>D+"vfP1TX1C][C5CP+e RQQVRdiQ? ?$Ξ={EfX)xR 0T C]C(nDSjΛd:~GI}' y =?O0T 5 //(;۷?5Y wBO^U~kɎTWW+OM$Ew-%\fZ/}F箻D憆JL%_,C [x饗,K) $!6&'NHA,69s R[[+}>$zǍmY|P%>}π`w^;I}nϡT;)w} &YK駟w߭֋+/Jz:;]3)д|'ȈIRv#Nv_E#J^/Zv;A61ۖ-[dΝ;%f醤ez/TAӀ;)? *r-iZO|#dDgw~S\\|=U=3HxrKe{h'Xvif;ERԔtiݺuvɃlIMN͛%-%-kLeb{ b+lO?닳VD#7|sc4TuԄ9{l$Q2WZZNNN*YRCu衝sLe)ɻcjr0JQ8qP-X*֪ 5a OJf|>)%*鱱4 .vmJO֞T %Fզm۶}, zzzzbXUfS%oCY2nбcǒL۝p8|_P˖9C3'}_eSWx<.~_]O.ګOk`{)_tǎvVwC[ij~*DbzUQ,J+E\.WIIqBlL?ēH$ LӨ -x,s~_*OS)|fH@P-9yM-Jx'$!7]Vr_޲v{P0T u% oSܷor-u&UT󽧩Dw[nêV&gΜq(**Rӕve0*.??i6x 6>}Z Bx|P0T 5k G?Q0Tz:_\jC' CP3P'>W=P!=, 2CPPӜ) * *+rz2PCPCP3 j+J!r0PCP1T C' CP1T V`b`*y@J_^a*bdsR bjRKu.47C[MK+'CP1T C]ʷMAΊ*fNNNر#wIȢZ?<<D^޼y˗h4x|>_kkeXLB#G53mmm~r5\Jhnn.((CݗqGFFtݔ: e}EEE___oGJa׮]yyy2JvCkK[vPCP1T 5 )RSS#jG<[-$t~: UЊ566Z.۶mӒdלԞ={ccc^Wz%I8U^e]AUv!ٴo߾u֥FSDg۝ݻw.zbiPCP1T 5 $|>_r<.**- mmm###%dcsR@@>11bxxX%KUVVvttHb``@z%0nvdYvS}ZV3펴nl-۵kC CP12TJm3xZny뫭*))bL9#sɓFrf*?gfc"Yڵk\-<)*f~IA}5LOOKZ격DJX]sP{R{ B333Slw^͟C\kn $oL$pX߇ZPP%ݺu.T-FJP277wttԮ3v͙JTWW+7DTjkkwuuɢXٵk'y~j;>T>TPq9P0T C]IC5qcYF, NX,V\\r`{{P__vfKKK%OYYʿ~i3R $gyy~T_bW^[]Į];KeS7Tݹ1x~r(4cJs*b+c0DҪ 4=/ C Cu)H\zxBGG DOJA'bj^o4Uߵx3gҪÃfzaY"b*d``*&?|&ݸ.``*b* * rXz' CP1T C]]j:0T 0E0]v~ίybeee' 9rƯ\PP D:55F%3~p$zn[eْ= JqӧOKe_W&''wؑ;$8ag1TfP1TPg4ƌ9mll̼gϞp8,5OOOm۶M;ɕկ J_4hʹ [llrhׯ_555IuOwag1TfP1TPgEEEl`mddr" D~~&&gI.";Qɼ9ϘI-ǒǴasvCe C u~뫭+))bvkf\<zUeK ;5p`*a|ULkffFDoL>jw']'PLOOKZy]씱'Cf_aY"bk>T+P׮]+&*h4V @5TWWKQ#.,pX&m[nM-e[MUOL&p;`b>/%g,___EE 륪R]VV߿L- Ir)2%K'奉bՍvԢ]dccgs* * :0TP0T CP!I)HP1T CU}|a*`*`*b*b9+"0T C ռuCbxR ?80T CP16:+`*90 'b** * * + OJA'b*/ C u kPCP1T u ux]a$_ b* &{9-o>=+7P0T C]:sϔucq 2N CPPkG;:޲N7}R)mS"bi9+"0T 0T 0T>'{/wn|m˽cΡ je2K/ro]Gq$0TP0T C]aCUk_vn#=?Xg!I)HP1T 5c UsW|K}z/ C u Uqʵmo\"NCP1T 5- CP1T C CP{0TB?"G **ʢ]=iZI766}'_J 2CPPc  ›+t:|u;܍`0rԚP_7x*`*j\ /lذ;$dC]C-**zhʏb@p2bc2iӦWJBԌ3t<46 fVUU3gԢ$dqÆ ̊zT?,'NHZ*)]QQK/%g uuuYjkkšulX֙9Ԯbuy,3&]PKh477Wzx]wsD!///SmW% u` b`bxřYz"|J;JȈdBɛeS,O?$lbS___]?-3tOn~+!4߻w;(]zT0CP1%7TSCU[쪸X*YXL5$\<ҢjHu@WxeQRNeSns=ɝ̠4D]vb*UU~紼A{'{RXX(Ν;U 6ԼZ֙={6(---MegC4Ur\'`0^xA=)uuӓRĄ۲̲'uhhرcv"b1Uw,LY~u{o!S}_tT'ש>`*f*Ir" hb:*h_988(GY?)%1-:S1T}鼤駟8Ƭ2ے'~AA'jT0lrv.<)*1( MOOWVVvmR͠&#gэ!mpPH%0TȬS CMwC] C Bx|QLKjӧPQ@PCP1Ԍ10TP0T CPC%C C C' CP1T V)*b*`*b*b*`*bG?Ip`*o 0T C]Ն:R ``*b* * j <O(:r^\PP DZfvڕ֦h{zr~IHU~rp$zn{͗/_kŲSSShTV|Vƍ28HQQqC CP13BKzj Q8eؽ{XUMZm۴/iZݴ,eC)RSS#R"ʺlCC+Kp`*a FFF+АJOLL,.CQQ7T֓ UDӲxj^^e)a.UYY!6|P0T C]C뫭+))bF 3Cj׺Cx<^UUzNzɓFP0T C]C~N;״0s;43==-ikfS6ϡ KUTTݻ7 p`*FQjeKKKuu2{=2PE 掎ZʲuS~zJ6}֭[VS)J"HbZc]]](vő`**+KKKnwYYή / \r,3MMM"Y~aLeX zkr+=ܾ}(* []yR bjjf``@V/F".eBf͊*biDSSS"zj8/ǧS b*||>F'''e gΜPCP1T C C CP1TN CP7rVEe=*b4 CP1TP1TP0T CP1T CPy1T VN CP1T uuMAΊ*b|N`'`p`*1s;=o>=+70T CPno?o|-w\9} CP1j룏ꤩ^Ʈw}jCP0T 0T+"0T C ռO`ᄈ+Q P!I)HP1T 5 WpW_޾6#XBf͊*5ׯ\;n2<* * ܆O~I0T 0T 0yk:'޸D *bia ' CP1T V)*b*`*b*b*`*b' CP1T V|db2TU=/]0TP0T CP3P4 0T CP3PB\Rsa/g2mPCP1T CPW-<)*fNNNر#wIȢZ?<<D^޼y˗h4x|>_kkeXLB#G53mmm~r5\Jhnn.((C6n( "λ)5:t( w߸̹;®]dBז<:47_0T CPP,555֋|񙙙i%2mNM緬P_cce벸m6-Iv͙J*lr+Mz%ƩJ( ┦:+' ɦ}[.7e%:޽[tY֋KoZk bjzTiI|<":yyy*]TT444[ FFF-JTɲLn}bb"??_JKBJaLɲvFCDgiE)oZk bjzix<^UUzcu-Fj<֊bT ޜ9{uRIDQu|w3ݟ31,PڵlCP1T 5 O s@gzzZWM$WOŢKΦo5>*3ػwo(I.b nwn]sÓR`*UoL$pX߇ZPP%ݺu.T-FJP277wttԮ3v͙JTWW+7DTjkkwuuɢXٵk'y~j;>T>TPq9[@aY"bim&n>虥AߩŊ].W0looז#nzii)++W/Mٛ]sRJB,//׏^l߾]J߫ tww صk'y~j;7f`[ylSiC CP1ԕ1UH^D"]]]iÛ4MC CP1T unիW*tttp*MZߛ`b*ʹ|>Fw-y<`0x̙:<)*bJP0T C]BCMOnL0T CP1T CP1TPP0T CP1T CM;xR ?80T CWG"v*Yh-Yۃ:}X+**^umrrrǎHB}Cv6+m 2tVP1T 5} Uؘ44Be<==]__o̶m6p&WV*HBW(gff*ɣԢ]6.lٲettTq$i\\~$=ߥjCPyC C u~ZTT488qΆ FFF,*АJOLLlbr"+K˛Lr,yLQ~I:ag1TfPCPC.GjkkJJJb]f#5ǫ^sUvRiyǵ:b*`*` 9*533҉D"ٷz{{u&CԮg({zz%-<. UvؓTΡwgN CP3P}bW]VM$U2hTyjAAZ;::kniitͣF]Y)M^nݪכZ˶CCMv6P0T C]C1T}}}.+ 8p@JKKnwYYʿ~ct2P($˥Ȝ&\__/Պk&U7zSv`Yc=\b* :0T CP1TP7P1TP1TX^xR ?80T CPauMAΊ*b`b&m]ᇡ`b1902R b*R0T CP1TP0T CP1TP0T CPCP1T CPap`*zʵ|Ob7C̚0T CMkC}'|wIX PCP1T u kgWܩ~s0TP0T C]C}=|⽿w'C CP1e7G]StoZj=s"I)HP1T 5}#ge__փ`*:<`z> rp0T C Pe ^|TӽηI CP16Tk?n=v}^gCP1TP6Tͥ_Ut߇ Y OJA'bgWoxq < 2kVP1T 5 C CPWPykmmm / 0T bt``0?d6eeen>=VS\\|뭷xTP~'|2_pV  C<Ξ=wwwGY/|Ͼ<1%G_r]x0T R|>䓝Y''կ~`)cJ,9(r `*ŋٳx s;w7>l* "W\O . 挜??y,%rLɑ%Ǘeׯ_g C + @@& *D '`x9@*D '`x90 j{FNNΊԳX9b*fg ׻iӦ_}|. u||{ n;///>}f`f*1==R^^ 7o޹sȈ6n܈p:\PPx W]Tܸ\.x!uJϡ';vȝEG"+A^|YFOjR.87̌e'GL.5wvvEEE i  .9ԯ}kjqϞ=pxllL766:+in۶M[,Im}[nCOYjjjdQ_fM<KvS%-t~].7iӦ۷朝޴v4r&}Uz{{tن <0T P5`phhH:=11?_C2קBތgjAK,tQQ]~#W^{ ^ԳsNYc=뤩]d߇UZTeeeGG$mP 8CfCUѻĉ*K2TRVl<eLK ۅdb6mڔJ;9u ńQPuoӗlܣywQ[ mfxOƷwN`*,<"<'iS0`7/^x-mUb)Ȇ9T`^fG]u}׻>uӯwDCsbC`^% |s/>M1L0/â'ԩSINSwÇO:x3!`*/XP|tҔi 0T~X&޸c?04242)fBP!i T C`& @Py0)`&P``& 0TwޜeQV22*S?"}͒birIHBD`LLSm?d+ecLj1 `9)%pG-))x<@`~rnEEE---sv2?yj(+lBZQQvGP8wIزedO萕P1TBx>PkkkĄR4-jkCC*kgff$o>vK֭G|e1NYT84&px|||rrr׮]ΆzC`Pel"Ƙ CPa  ȈZn͸UUB:u/"ȟ$tHBE͟799")C"ʖfBP1T`^XrC5QsjB_$pHM2T1TfBP1T`^HCu>j̯N@+Νzkf믿>gut^S[f"HeW1L* g[lEfwI955533 /TWW;k_1O,J&}555W^5އUvMgtsss%-.3!`*:x>P=ZZZrް~ȑrx;f_QjSy<9x衇***d>}ZWVʾ.k;fmS.1L* |)7ˀo?wbJr{<T0T `q),7ߺ P1TP0T~<5M-7Y 7*0`*,I`ɥ*++_z*02M;w&77vvwwjnfME9fBP1T`^X4500 :455%gϞKC+2`*0/CNShη\_nwEEE__\PPxDg^k׮6T㢸W('Л,_$-EJKKnwYY®߿eK8/يNvTʮ۷Kbb[eՅ~fB C"U(^ 0T~`^)XnիWᰤutpCee`壽yh4'Awr1`L LS̄2i ~9ף`̄*S?0Me-`+ * LSi*j8.`LK9M۷y<\Z777HD"agKd:D,+++Pȑ#ƭ9t [VbdxxXV1q/zܜ^<HOCE_+ݶm={ᱱF;CMbl4uC-,,?7k+n׺e%F֬YgfftUZġ*C]s̄bIQR:44҃~_611og***26lέ[Vbe^^͌]U~iC2P CPy ո#>YܦK̩JE1֊b9[nYx<^UUze/v3d&P1T`^l0Tsl@@C4VX̌J' M|lTgzzZwR_{h b Yh555YLTWWK_OlKv}p8K]Vz(**VWWqRR9[ĈԷnݺ&S9K5gwVfBP1TH/x>P[[[SSSv# \.Wyq݇LQAXaʢ(E*>T :|8â /*G^U8l8Uj_v^6bMdڴ(&*.*ai$Te)EO@~=={qPȺNx/, B2?}tI `0L_ӵ6bD*U7ml2혤m&zhԭqC%PCPCPCPr4M\<* @PIpȄ!`Tv_ yJS? b@^ C2!yHS? !PȄbb* ) 0TR? bԭ7PCPai/0fo@&P1T(4㉩ O?dBP1T /d'M';|*ͱ ? CP i?針ISywt0T @;Ȅbpd7Mz=-UȻj*0T 0T4uwyCH*`*@[oN}zP1TPא>Om`*`kNS|}`d񍫌$`*"?HSdBPIOQ@C%y0T2!`~ /@PI@^4dB 0TR?`) ) i`]2hNWpX&pP1T u#mw}j2)6(Ξ= ڥo' ގ}ÇlosR"Ȅb@^N!kll"މJK'N<---DB\P}}}WO߿8_")tEtRgg6;jӾLNOO矙Jy"y ;i\lvvVMJ(y>O7nܐhf-@@&*ei_%"xdq?)5,&ʘ|t)(PmQ6$ } y`[T~ C RdJd6cdBP1T\LSPʤp{-PILLLHiQ}K^x<9TmL laT7-...--%]zI}ʳ+uҾsyy9Hbb_PIlr\.S)doJꇝ IS.3@& CE) P0TR?C,ArdBPI@4ʕCVTWW dBPI@d-MMLL:NѦzf<=ju&nCP!=9\e_njjP+v CPCMMYZZjmm-\E PʙhkNg(bzܨgffnvСk׮%)i;B$ .+_7[ҊڹiJ;ƊI ׸RHgggqqqQQ@Ø[ubiJBGGG8^X^&3'Jwh4*󬬬Éi;"6ZiNww_---MYm\&^;PO<)-kep͏ubitZ-zTY G;>(ڤ[>Zf+**J5Tv~%L|Ӫ(.nӔJ̌*K!Pkƭ:P1T4es5ILo!Ըnᰟ_ObFXEEE$cDw gkB[* 4lzՓ,?+W|ccc7ۥa'G5Cw5a܊#`*l4ԽwRW_}ȑ#-u B<VP¹9]_RR/'637Tf|bH$Ȫ:嗓BXә6V466Z]*hujø[G&P1T /C uӽطo=ԥv*mmml_ԓH\PGFF*++Ng04?iE6'EJ1TcǎIgĕ{{{M?a܊#`*4ùr!@& eT7n0~GdBPIC M#v777~2!`~JJ M`~ M$-bM?0T2!`*Sd%ME"`0r?]UQt:LJJJ񦦦x<*ozzZU^=>wG7IALb@^ݕbXcccEEE$2Td4q;}տRFlfS~R ͦ2!ij||fb>ollleeExjl$PKBSND"%`*4<55 DUY̦/]=r~7i:%rΆ*GGGeٳMP1TؽijddtA-:9*;?88MZJIj 9KKKǎ5JAyE?i*;\rEVO644+CP4ttt7na)p CPa; M)=nnn^ZZ/]~$`*P@PI@^ L*KS}L* e r(M@0B CPCP0T HS*`~ CP4JC B>E@vԭ7%L* e,^2l$J0T YKS|˟T|W~ y ;iW^x;GkyW͛c~ n iT>2RR+nΛ~?/|d)2!`*`f C 5ݟC-?m> bڥϿG~kTP1THS^؆{0T rLSWQ*0T 2n]WI y4*J2`dBPIe M C%C@ J 44HS*HS*0TR?SrSN׫p8,RȐ C%y T*ypZK?WVV<ᘜT5RKKK-bL* e u]f뮻uuuRC{dBP1T /lfRvي 3{~t[RpݲTmms=[wXVgYGU~3>5yiJE PHsxxTCU??|&!=wN*-bL* y @jccc<_\\T*%UT&| )kI^xAwLЊ^%H JGU8eR}n000 pxaaaiiPMiӾ8LNOO矙Jy#lΪɹ9ܳg|2y )IYj[/\ji{zE@\^^x<"?),"eL>fh6˖nTRbfI5"d9q-ZUk)dJd6c* ݆juU}p;H$ѨtMNXp{W˗.Ρ&} :-3grմ}o* iza&Ի'O'2סʜJ嘅2)k[YsyyYL瞫z郈iQQ4;99)JEnuj}}7ס*sM=XJSi>wJ`*X?rPϞ=[YYp8ʴ*DOՙԤ{~Z*555wUhe&gRd҂N@QSyJE<@(^ŋ"R)*hi?衇~mJK CPaF@ ѳGb ~u_'yC{O=L&&+ CP iJ}Mur"P0T @LV\K0>>)TC㮿L* N6jդvZZeAVCPCM>~YTTpuY6}CP`wC?~\=_jyyyttlIOdPCP`)әH$*))q\MMMxd*RIoIahhH BX̦嵮=iutvv -=)v"###]q@qf5?NRkkk*RPRsJS^W=5AC Џ3PI? TmmcǢhvwwTJԤIKiAۿ}k]iɓwh~޽{6JkR)h5nMj36P_XE N&jGGn\&lG2]ZZ CP4uƍ'NnwqQo*/..P>/i߲LnZ.++IQQW\"6jҬjkdҨz^<MW&=Po 0T2kikkcD}w|{סEʹ夙Ӯݴlڽh4ZSS#ɶ<ӕRhnn_[m7ɤIIXQ̾" C%!ӔLzM+CZZl$g~UZYM{.3 :u*^DwժPϡ,B& @^ISϟW_UwttԨzzzfff<99/,,,3m--+jو<^*vkijD_yQٳ"n:*p: ;4"Wո:TcgdĔJdBPI@/666nz[[[g.EUUUpw\g޲9jrΓPoK$VgM>KS~pp~fA]}ѿ>Cliiq:VgɤQ']d:##& }YL* *ij4442*4zC%.]b(0TR?i C%ö4;ȄbQ`*PLb@^`~ /`@&P1T*0T 0T CP4Jxuԃߖ*`*@ c 0T r4-M]JG<|}Ǜc~ y`ԩ/_ȯ|_88sF>RSP1T /fnv_ ywx]whk|7~opL* 9 @>H4 dBP1THSK?GU `*`kHSS=u> b9Mݺ~72b) Y4@& E) P0TR?C J2i T e HS@&P M C%JPI@Q`\J4S˹@CP4nVWW \Bc0TR?&B)q|ˤEv\.r.kP1TX'9\e_njjPsP30T )MYZZjmm-\E 2Du:{xpyyLB˗/<咵OY\9R"RVb DХ2tf0̙-9tеk.$5Y\\\TT400`ԙ<822ߝ-++K 7_dBP1TP6'M,,,.VIXK$Ҡșqz6YJ/.ndcݲvLJSwDq7%ݓNIoER{e3)F'Ry1TZC'OʂbiW:x0ͶgdBP1TP$M9NE^Ԕ*KALT>&4i&C|zE1L U4${333,^ٌfوUjYY)U\]]}ܹo].0 ȄblI9 F-ZopdbV-nF555n;ibgôjښVOWVV^C_'0 BiKn<3:yfv'E*qs (3qhL2CnC ͔HY;UwjV'\CЩS/2!`*)}ݧW_=rzK] .@1Tue`@s߾} !q&=aaܜnNY䤾H"V=ɤ)^CCmcccPڌf͔!7TfxxժMgFGGegϚFNab&&&ԭCQR{{k6}b۫OoiiыbP($3>}_4&dΪ*-皤El:)cǎ9Nt;l0H$R^^6sppPzPo$njզ3 d* i* ʕ+"'rQ>A0Ԝ#߸q]s?8aT=sppnldK. C%@C%y0Զ}5ks[̗L* e vnJ0T2! @^}ufZI),CJ$39jߠ䙞CYM]ָEVC9ԍt~0T X\LSsssinffFʓJM#R+))QLOu^W555zm^d*Pz#GeMj(TT1l󛸧Ȅbe4rҞTGUUH*tAR{{k外cX(f~ӧuU6ho>\appP/kEVC!I2os/;{L* @^0TR?#MC%C&pȄC%M`~ MY(5?>>"rN& [ C u;ҔxmVWW uo bf騰rL9}ܘ* ugu۝J& C|")>rtUvU xtaaA7%5ҎhsZ^^6eO.MMM`.[`Auvvɦt::d|^h4lYYy2JahhKP(o::tWF"`0(}dBP1T@ ː)*GlqgEtSR>~m't}ww6??n?eΡɓ'EdAD{FDBZJԙEtmmmmO5TgY\Zod])WG#[:==SSS^W׋'٤^=^xn>y9>O/..ojٴ*M0EfsIʕ+OaJN~ɤ1GRz>000;;K& C /CqRlwu ;*jVgD}|;giٴh4ZSSv{x+++ܬoX,(f_QQDȄbMC:jMꍳswq˗9jS9TewUZYM{.3 :u*$uΡZu2}du9;ɄbeUC5Φ/᰾Q;SL-t}OOO]]$`rrR_Xi5e3UxgII(ȑ#3 2ٳg{ j¹(32bJPe>8bh}me,--Vikk3^)z*;B&*~6IDATINSO7_D38\}IlZCz/X6H$R^^.MAo^̠_Zm:/#ot%uFFL6A -?Pihhe0T C͍>wSb4/:2P~ҥK 0T v@@PI@^ L*KS}L* e r(M@0B CPCP0T HS*`~ CP4JC B>E@vԭ7%L* e,׿2r2l$J0T YKS?۟qݡ􃯎@& C2@v_y⽿sʧM@0T v#MSO?#[rI Cȕ4u͉o./..PEьxLMSXül쬱NMM< զE?0T2!`y`077ww~[J5WA}Y&Cd1l1#XQ䵢"6q݆AP+PI U-++S|>ߵkzb"Px< j1Sǽ^*KA|5=mI * nRGSWW733#f5Caaܜ߾}F;ƦjRVXüTPKJJTeggg}}‚g8Nj[:A 0T CC}窫 QUU522*].^*B!>}z#Բ"3"N3 o]-//'l-VK * Iߜzp`dBPs"󥟐f#Qb`CA 5kS~#;-ycC WFx{`ӣ?7. @^{̙3.LI;z(z eȑ4%EK C-/^r<3 p`j:p}#e{Fkwp3g|HȈK/~CC;Go!$ .|k6QC%K^ jN*,,,;vC͆o@pC*,..\Vt::d<:44> bTҤ]DSDURkkk*RɤΛZi6Yk:k***^{sTAjcwL H 1Խ{FD"ܬg7Y{ .^VޞTiiHC\X^&31Tz7kcmt}U9;;+&,eq_5Vy0<3L***3SRt:36E|>Ԧ~XNMM< զX4qcccbR///Wzaitˀ捡*\.RhvK/ѫ0E^+**"imif81x)TUUɾ;KMkÕ 'ѷ"CcccʇoZ[*e||WsblDBx&Pܔd,8.L 0b%%%#GP J[OOO]]̌'''ŔFR}.%]R bp8:}&J盚tU6ϤōٳR`;P#(k2T)˕""@@RUU522ڔTVVV:`0^ZZjoow֦"2B꼸X7V㙴˗/ˆ߸qCWL 00TP1T]?60T )6 80T @by2ɧm @ b*y60T 5g\s 5ߓʑmR?60T C-C 5' h``@,--"488N޽{/^8<<\^^.PHx\.WssqT*0*))ś\^^֤RZ2U<77H$)!zpM=;T-:OT-k.k66kee󴵵ƥ.4) (}Mݿ}ߌ'R?zEh4*fkiMZLW-VZVVnJ)jaR%:4t:lzqimM⋋źf tΝ•+W~~SMi```vv־}M=%+**Zf̨Ri0\4L4uoz#{ۙ]*j꧟~R bL%}3E+**"H&Gњ۝v42i:m`C]9TH9 5DU"Pxm9kdLCݥ ;>;d;qtJCPPRwpJ0\1T^xŋz=#Ϧa sM?6ؑ|ޯ{P~g,;Oog9ԴP0T r76_=}cwt/=]!`* @luZ}So9t:.^2$+ zСǏJyyyytt*bY;:D"aAUZdR900z*))q\MMMxYD 55%ѽLZ޽{I#ؤJiSC---U.OCitXi1T `k7n8qD0t>RcjB^wjjJxlb?z6idzzZ31T}O$t)=J:i xQQJڙ2cLԥ զA-8J=P1TX'ܼ`8ִڛ¨tI_^EњFVX,(R[QQD2:zJ(CPaK` +zƓ>O_h).jMٟCFVVV,Vf||\0x&K/x&P3A=%Ӟc* @l[VaaܜS_p)STHyrrR_h$e+ʤ)uj|PKJJGYZuF w_"&]o5z Zi1T  zFp8^okk>SrVikk[^^2@ VUUv&eJ۔۫noiiIU$)//~ ժ3FY>X/3WVV<`PWvw0#ܼ`*`|8fŮPpPp]e\`f1T 5[*s[mVgl CP!p lt;/nۺ2!\Su&VG*b* @tffv;C#R bXjV^}QqqqQQ?>xZ[[ WzUy<jnn6.x~:tΩٳGoQtH$ eC2^p.x<ɶתeRz@Fʊh~%IV˞g{{mPegeKKKsGZR`Fy6P1TPA>Er1T>`4lCHl@*Fk||E*0@`EМ5u^H-o֭u]iTCP! C݊hޜ[K?˅۶1T C]O?3 P1T<;444nө,..ֿo|z{,--"3 ~i< b1UD éUҠy.MMMxvShl$i(4n?/Ζ*Z7pLݏ6Oz#bsݻwo4M$+++8ɥ'Z=3#/R__o|r4"r֪WV GjwwtR6[ZZzΡ&=5i(2q;l[[(fsMN&;z{ CP6**STTeee1"4zRxw\"ۮjjku2i|lv(mM{ C0W` 5h4qk_}tϔO*c*cXcc8bEE33U:oĴMӲikk~R +5mHt2ݗva*l(6P5>ollleeE9T>I8{qeS{k+30::۶Cxܼ`@shל;g) 4\.% 0T>`}bj@1T 7* * yE`|RCPT *0 Ml@4bb4O `p lbb}uԃߖ*`*@/S2l$`?'̧|u,[} Cz+k>hGyF~c7P1Tp l{ ׷>>us:uT uCP1TR!l@4;CP4M* w;>{? PCP ؀h<~'_K?$ٚ@N@><3p/?`*7jЕ?R*`*7_Y|*# * 0T>`* (`|`W+@0T>`OQ@ 64D#PPP n^ 0T VpUUUsss pPiH$._| 3==MP 5ˆinnV奥U z!t:CP,Ӌwuu\x%3F`*`Yݺ~s$o_# CPCW|Ѣ0Ub7c~` 8C!C C CP1TP1TX' * E* e NP1T /l@p`*` 80Ts?60T 0T CPCVC-(܆UCP1TUU NPoֵVK;:.i:YQ@ b* H *J)y<ܼꗗeR*^=LCCv:vjDWURR"-455$-"!/-BX,aSV`SD"`0(and"ˀ(C_XE ^ z=޽{h"XYYyLev>I4YDi޿Me+=iu\ZZZFXSw&iYD\&ˀ@CV婩)׫eeezFL⋋ř>#(-;̛ʰWzj21T?000;;vbdIu=Z` ؀Pf]VnFkjjn٪g7T+NTʰ8cXccdEEE$1FI/PPkfI߯gMP}>ʊ寕lbP}fҔU*M9Õ[4JFcq0jOOOuuK/|T___%:TU'09 +䤾H"VISVڷo H755%]uVcD:gCοuU=_ufP`0\E 2i^u;yKK򲪗LnH$R^^p8~࠮)dΪ'-bsJ5mSVbPH՟>}x/lF:PYYFR˟4֍R{{k6C3C  NP1Tȧ( ؀Po.(!/l@p`9d@^ ؀P1T /@p@p}C]XX>NgQQQCCŋ::D&&& eϧ} LHu,洡:t9ˣP\"^;44~t_njjJՏO$/_>qяMZRjNOuess6oP5===J[ CiC=vX4MT{C-..^XX0][7Tih#K`9m7n8qD0t>R:NuټPm_R`@p}C5255V[[kyCuu~~^R( ؀PP\c x׌oɤW恁lڸ{ո` 8CiC=555ꭞK.ɗ_~R& wʊLGIF?>q]ӽKy60Ԝ6ԋ/666n!z5@0t"3 j}%[=TrBNcַ` 8CiC0@bb* * 漡fE &_K/k 'N瞻}xg) NP3gΈ~=?9iӗ^ziffO;` 8CV.\ 2e?R#n:11!zS@fЛO;a@ ( *nn]9/ f|;Eg#m[) &_}͗>V1?vvCӰ ؀PD ??|?ha͛cȗ ؀PN>VMx_hq)v2'z{6UV;/ @ U}^}[[刏-:s@^ ؀PBϿx!1<0͑w~IL`Crn]Nf^~P-SPC C   52  6*0 l` 8a7?_| 06?6- B4ׂuN^z]{ #0&$Qbvm>v) !~v1!A U"_%^{\*C cB%&Ciݧh_~|ͅ&?1!PIP;?W%ׯ7y! mJLp׿˧WŔ/ɛvpƄ6C%&Cˏxٳg+bFM^;8~cBvGỿ$ݯ}k###믿N䵃0&$`*1 *]&CP B u ΐ CWOh@I*C-x <44|8 uK[;qWΟ-**pax#lcQ~˜؝!gxx?}C]g_|챾_> 雳\F=~ CP cR l W vW}GZ7_U]]oS ~̙6su]r]2?Nc?O;gv}W]QYo P&g_֫1lo2 /ҞV5_Jo"6VhLxO:BO[1!{ UXY/?ѤH07~\򕘄m5Tԗqc+<tyϗ_wڌ.,/L_l7Y JAV!eMfuL>*aYaLHsVoe?aCZ_{}xiTX3lG?zEϭ3Ԥ\&C]h3ISٺg̘WVk%?ȶyfuqOAF)IUiCq}I&$&!k%3?gGdpy]0,zoB;Oyx m˟{~WrW33^J}wY [[+t\OHz;Lz^L 5'CB%O) '!fɬkF Tz<1qsuQߗfc񰽠MIu~4%H"U| 5LtB 02ryn[8]TSE_4:{)#Z^mJ?ѭ/)z.f6=SwfX#zb>՘{W|v')kLOZs-ޮ6p^,ƈ9}7_u~4%H0ϗPc΄8 %T*eqR0˿2M x-..aj*W,>dM0aw|Ϗ7I`Xc@Q) 瞗86vSjbo/}ւߌ(LS@BH?,((@B%a0oRLh(@BuXQP'J RLh(JB*]?,((@BHS?P~PIT7" Xi(`0RRP)1(N¿P~ӗ>yݓrQJQIԛrv]X%P'FBVV*,:" `Rc@QS ^ZʤMH u[=Wf" `Oc@Qk h!@R:Z)0c{$XK:=:ĩjzΫv;EJ! (8 }Inzz" `Oc@Qk  $T*@/Շ7^CaPIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/images/sequence_launch_action_plan.png0000664000175000017500000006637300000000000026425 0ustar00zuulzuul00000000000000PNG  IHDR2to)tEXtcopyleftGenerated by http://plantuml.com09qzTXtplantumlx]o0N L%91Dem@@!ukq0ovGO<=-R!UZB$7gDLQ{w"#0[0R,EJ8TZTBS bxwjl[$nթ)*XI"RR uc۾jc>8_g8}aiX/gf4gRx| {4 ~F8E2*ﱐ4{%WYHͳ1HB'}F;/ $_KJc>$ 8#?r0_hoAA+SHL,R?-6d%?1rypLEmw7v1W_)[tkIDATx \Tuqѱ(BRӴH,Hl+vq5o'mZ*( kE)BxC+=΍μq̙3g2o̙f?Ҍ&XUWܟqOvt3ݺY8aמtJ[ymX^'%ŹO8vڷsS+ ?}dlaֿ8~by-r,5Ӓ?s1!ΝSvL^ߑro9X^ʯݐ-3+ʿT~C_uv>w)bAAAii)@,['fINh,\9gzhzzzNN@,+Kʢ}'&2ivYs{!NJJ'5KI2+r]k?8x 8byMY쓯73Y˫wܩ$N5xE63|8++ ķb:W1ѳ6nܸiӦ۷6%MJ&@嵩 ==}|mH /h%+\9U~ay< ߕҜ3GbA#*mw)y @,[YYYw0!J~%ndrYSby(--ϗ}`c&! <""dfӦM{=|eeN嵙 cr 牉ёd&66VyzzzNNF&kɷK,?Z߇>XIf|9X^y,P0Eb+/מWmr\IYdWj-;夺r_|qdJJtiWoPb9門2=χ.7KMܪFHu2[Ps)Ç?AY"]e>@/{%&fbFwWPvXnYKr/l'Ari8/c ￿+W쨃}`}w:(v}}3f<9о}۸7n|)~{87Y~?Ǐ:n#G>˿DA\t{;WڵW_m$.cy-f/)B9"&b"[i@˘)#2oXӳCd>C$C^=ʕKKͣFV((9r NR$%ExBY$|Ň:<|>g%/MzD,7֌5Ô36˜Kg;F,~D7ٙvb3ǿ'3|淿}Z[I}U`# O>mU߂?gyy#rob/AɣԷG:FRޗq門 oe^j0K -V5B<ƴuVaIwگcr}cE,pm{G c\,GDmXg i?2kFaaLߥ3S#U,WKWams^}u̼ڤ?̪U!ɌDQ]\7 Ekdm [IrQQkb9.7"潬ޔ !Ktli>wʩbb1i3wjWVW~~(WB6nsN~&^`]rӅ]7e:yU6m 9))#F<5i҈*cf,쯘]cA־{74\ 72ӻ(zʷ?ujZ@@/y7Xq7.w)yc콅6|˛h #kv_Th. 2#9fQ׋Q}ݻo"77Y(K(!աO'Ferܯ;ll5Yn{BLx9HQ3LY_1c#j&@kxBKv@S77/0USusj+b4k_mL}SrzfS|{n(j0L],w5cyr;i0)j0L˛L,G#""w.L}3Qb9m)&KIII}U¹kzyyuWnwuԩe˖]vMLLܼys.]ZnSOeffdcٲeK=Zj:q˗/?6{ܫMYncc#7BBB탃 >J{{{k?AќJD,0Ek,5k֭[eޓyu}:v߼yСC1[ӧ7|駟Vo1B??:BNNǍDЫW}F2x[nnݺ%$$_~}ƌ'O6]vIq+qqq703O C5<$aH.Ijv1111[B2/ҹ))]?8qn쬳}ݗm`boH6>}kt*703O C5V͛7n_]ޥo5޹sG;*ԟ0ՙsmW^׿e"+8wܨQ\\\ϫ|k%703O C5>7eeMrD,'Cw}Vzy.\uVIIg48h ?%@a"S` _t/((Pȼ,?W!L eEo6\{͛7ÇگEC h20L,}SrztT(E0d d4v&LY(K0߻wKz,2d ֭[׫W/e׾}lGaaaEEE}C=ޫ]v˫P_%8hGs&o_r&Pa*h޽g}P?`]HX߯_?FӶmQFsN:ڪ?'Nٳe˖{u%֯_ݢE Hc@#T~Ϗ+Ӿ ~.Sf޾),zuT(E0d Dĸ[niii, wT(E0K b9д%%%庸@cjXbyE PbyBE Pb9E Pb9@P r?@j͚Ye-u W\V}Xx,@UA,vh6X"YCE~޺~Ͷ\o ԪΖ -9充uޝX^[{%J2NIIQ7nܨY.3iii!!!NNNJ&Q)ߺukWV===5_rrر/XXX/Fg' 7ut֬Y*Ɍ4|ʕӧO/++٠2Amѣ mذؾP'tǎ#FVzaݺkmc!_[.3m4yvyVRKcJo@,e6֭['3wߍ7dD#֭[BBBqqg̘1yd1fҥgϞzٳu$x/Y_Y:`LYyĉsQgsrrwn{^Kr+…S*ŋu6hq nP[%I=/lpMl_Lofԩ΁7nyfchXcZcES^ҰbRP[@jˍ]O>3f̼;...ʙwqqq:k1;w?K)*EEE[V}}}Ϝ9_xS]ܹsU0ozyy:uJG)0\]_87 駟L칉k7G]_}9=خ]k},~/ X9J1`͛dk׮{ӧǎ tttT>lkkk0h43#'OD7o\]Ν;wnTyWYe$3c0?~<((ٹK.{5k7_O2Ee6mR/а k,0]k},yg7nؿiSl9c4xB#>F1e766VrĹ+Wܾ}[Uӈ׿%[r'++Ȥn|^}3礮H/3+󃢤={s3U|۷o>|äI[ k,0uk+麨HN<[riĈ;vPW Pby5;ڵkW\%1&&FFFFFHHF\\\RSS-,[LcW׬Y3p@yxYY?|t2Wgԕϟ~:00ު24Hhe^ǎ+1OZ@by y /^ ֭[6X^w_dIaaaffȑ#Օ-{bV[WbgoShhZ%rHJJjѢťK~ -?yrW||v5HsppPoϛ7O򼣣yB6۪U]V7ud̙eFLX\\6>uT +smG 9@,P<ھ$~;4}9yXW,@cH%+ST'OSrc?@-cdDC X9J1`4Kء髤 sΦ)@ >F#=*}_S%P LݮOjHJ1`EK %ɝHJ1`E@(K b9ǥ@P Å@P r?@jr)@ >~i-j>2/[la :tŊfA.-PM:0óm6==XcOvqq)..0 ,**ڵk7iҤ˗/k߻j*[[[W}mjj5jS˖-{cQStz͛79V6^[/ء7v|efw13ʚ666>oy%؈͝6mZǎ ;tm۶QXb2s…ѣG?^SN^^^2o:K[zKKK曐bJ>jl7}Y>nܸ{X,Wf O8ꫯv!++K5=Z\\|ݻwKW׌SXcsvvVoٳgϞ2ӻwo3ÛK# .LKK0doo,uւ <<<$gtu6XZZ:k֬vdFn+_^RFKNN6,+Wtuu>}zYYγUcҩSgϪ?p@=dYsÆ __ر/(.X~5ӧO*9d'N_F^z?O&9==ۛrVˇ \yRMjw˻u떐P\\|3fL&LP/kd3Q&2d-?vX``򌶶jb2#7MolMoخ{N:Ǐ9;;we޽kSHL>L͛77?<ϖ{ȑ#?Wu}b-rzz{!@,*uL?~w˽7mtʕ۷o˿:]ϖܠՊFdF#vس=zƍ:rΝ={yEƞBg5#9-\䡇q?b s5Y}NBjX,6K1b¡CjK+K .--okp%vBLL<<##ClٲYrT Ο?_ny``[oUX>h 2uӻjY6oެݤ-%I,СWd)tV[f^Y"[ƌcߘbڵ_xQQщ'f͚eJ&6r9yz%v9ܻw~÷l)^?@-9^zV%J;88lR֬yxx딗ϛ7Q044TK?̜9S̨g˕+3N:UrwسHXm۶ m۶5M= "cOZEE۪U]},7rŝ;wmڴ޽o\7sG_z% -Zpvvu/w P Ejio(/hIXB{IQv^vt2J ?P X! )@ >~)@ >F(R} ?@-9(R@f\>UE Xa O`&3̂b9h\wOjkkGv&Mte*uܹcpg5mll}7|ҥKW3;mڴ;9::2$>> @,X\paǏ7}Y>nܸ{X,Wf O8ꫯv!++K5=Z\\|ݻwKԓ'''{{|e[,XzN+--5kVJ2#7֯_/O%''N+Wtuu>}zYYγUs=zd 6|9ZEEoݱcGGG_|QRq  //ٹX~5ӧO*9^'N_F^z?OTX@֭[BBBqqg̘1ydeҥKΞ={ٳgM ̮Կ +eeeIv]d*PIf/^,v՜gi߾}tttii_~e/W 0`@ff9s樫t7\=tP^˫ fl#,iiiTb9(RJ]ܹigcԩSʼ<[]!77W/**jݺXn$%%E{#w՜gy'/=s2EOOOsg;999c+:9m\R nE{_ i(RnKرcJUk43c2#7Mo,ވ]5Y?ܥK{\K׎͛77?+\]]'L^rQVVc9rΖoX,77775 b9E Ԥ7mtʕ۷o˿j$`~3qZ\݈oخ,wٳgIocl:WV39;\䡇q?b s%g@,@ VQcbbJKK322BBBHlٲYrT6|偁oVbAˌM憎gQǎ+Lby cYm͚5bel~3fok"kJEEE'N5k+ȹsQX޽XΟ9ZE _ yxx|޼yaaaPWRR2sLJ2~\<ԩS/nzW͉۶mj4=z$$$|9ƶZEE۪U]},7rŝ;wmڴ޽o\7sG_z% -ZpvvNLLT-ˎN]R0˟9Z 7v\ZHJn7X#1fbjX,>)@d'"=ߟos4mcFwhJ2qU<P2VQ͟>=NjX@#GQ@,HJu(;//; Q@,o\@cjXbyE PbyBE Pb9E Pb9@P r?@j˹`EڬY3c M{vM4O5M_~eu;wsΕ}X],`ÐX^u.\0z?gٻw͛7^yÇ+={fV 6^ˍ#޸q^'%+2oz9@,@,XSO)K.988^XN,7u򜜜L<'ݓ͛7///Werb9,Hklu„ .]2L~au g)@ \":| n9;hZ sbyPTׯ7xucN>FX޺Y|O˯hX 7o\䯿C7|Sw .;P h2EzX)Ķ$ ަ1$c9\޺Y|]O{]K`U%hZErMrz|G~q#"%ruK==qNLԹ<eO 2E5G\ 2|y-uXf~ -TOǺ .8EM;s.h*Ez:~{>\쭥?VU}Xf,B@+_O<9P h"UNCHm @jr SYJ[Y~mަ1@,@cjXn6.-P%hZ b9r@,X^g@cjX`?@cjX@c?@-X"(@IC$3?ïٯ- ۷ozj Pb~GJaaarSvmٲ>s@jXn&Lh"% .8qv,߿g}FkSvΞ=DnJWcytt>xqzX^lmmKJJhB2sNX^\]]M-?tazXnX/-0n8O0A'|(!/Rc?@-˛_̙3m۶ S.7Ҵc/#""@=)@ 1po,>n2J>y䯯_we:Zn0FcjXnr,ؔwb݆9J1@,fwlܵep%hZ K (ǣݦdrQ)@ 1`ig1\d+ϓ:}[<m_ 2E5x+m<7F,9 & * Pb9`(>F(R} ?@-(R} ?@-b9 @,3\ZHJ1@,o0\H&Z͚_y{r/gٲeÇWZì3C]bu͟9Z shXby#9m۶MOOWZì'U>}ť@-9c4@,o k7o9rE<[s m۶Qb9`j=zG}D,7n͟9Z K )DK@:iii!!!NNNzSf֯_hnZ`իMl=r3-:WZZ:k֬vdFn`Od+WL>F3,:u:{EV-=.dzƴiI奭ZJ{toooJ?s@,O֭[BBBqqg̘1y*SPPPVVVaa%KK. yٳg^ c{blwOb+…5YY6r,^F3,[.**WZ+|ѢE^)/M{}y:iLXN,VPP^evU%jsL\%=1e7N:ˣ3YYHJJFl4ϢhcZ/Xr,9;v,00Qm2$ϒ3AZ[^Yff D56ZN~Gkxu^·ؽ_ `9{ӦMW\}Uf.] c{bp666lrª߈]5,G޸q5RmƺP]KlyJJ%ƎKW\ZH&Zjqss)-- Q/Y033sȑUe˖F^A]\\RSS<|ׁoVAo\ˌ׶M缾gټy#,*}LYBUv {%Q{Ç߲e ͟9Z )DK@:>>>vvvd???Fnݺ*gyyyj-:88UII̙3ˌYUSNվȹ]5,EEEm۶MKKW1XknL2EvIvLJgΜqqq)..-ˎN]RƟ9|r? PVF~*|Æ WZ}LpŊV %Ǻ =ڂY8;b9`(b9>V߾/Q~ SG7xb9HJ&QP_ˋuJf#D6;ߘO@,E Pye#Ͼns`,"Ш)晘,fi;'^uw8b9Pqj,j}_w3$2XS&}=sf@,#o#pi"(P|=OWyzb9(R} (;//;9=NP%hZ sP%P HJ1@,?\ZHJ1@,rXpi"(ڿYx;]?R .^βeˆn vfС+V`,"(R} -,7SXXضmtuӧO39J85X͛79^i]СCmXD,_(>F,'Wѣ?#bLDDĸq~%,'899 fMY~FKNNVߺukW6ADʹ^iiYUY=W\*;3}2tٳJMd{`lƴiI奭ZJ{tooo"byqi"(_:ݺuKHH(..~3&O\ev *,,\d|ҥ>^:{l+dlOmI8p`v/\{"+F.TŋhuEEEJk%/ZH+i/O'XD,iCR.((pww2;*5ՌԹsgSV۲^^^NRQj퉬n$%%E{#U6gh4ڱ܂_ir V[r1cǎ:::*YYRRbf03؞Tk+ˌܬȽ7RFԩӏ?h VbKK,,'{{{oڴʕ+oߖz777LեKdlO nMCVXU7"31ƞe7nWXki-OIIѹرc_b9`9-&&4###$$D]dɒ̑#GVǖ-[ 1{[vqqIMMo\[5  Rq-3_6ƞe#FW=o Uٵ,XF>|˖-/ڸ@chlFx;;;puyrrF\n]y|޼yaaa5CCCd̙eF,tuêr}ryƩSj_{mۦY+՞7օZSL]Ӿ3g\\\ƅ@ߔFjˇ f HXBgaQv^vt2jXxP}'J]t@,_(>FW5=Xn6OHoO  EyaqjX"(@"(hlnEx /P%h4 `/^l K )@ >M:ͦL_^0[~,'@l6eU&LXh/BbbbRRRzzzNN,'oٲSN͛7 7X>z踸=zT9,'9[hcypp֭[cbb$'%%5Xe6@b>Z|1򠠠7J2KLLl(R} ?!+1{ʕ<,rʚ5k~{sΫV%+.9s7 f|7mڴ}P%h4X>~s)KV^-Kƌ#K^{5 ϳj&P%hXx,?}D}geeɼOr29HJXUV:VXN&'l)@ >===eɩSodrb9˯._\5駟$߿c9X۰aC=4p;wi,'Jcy#XN&'72\fHJ1hұo߾o6zdrbQ(@cѤcySA&'3_(>Fâ $/n rB+%hX`6KSˉ(R} ? W(R} ? j,h-_ ^cm>Eln6B,ˉg\j 9\vM4ˍň@,X^QQѩSp///oX\pA2 l)@ >BBB탃MCY~FKNNVߺukW6={Sfz.f3-:;VZZ:k֬vdFn/6//YgV2Ul|HEEoݱcGGG_|c9?JP%hXC,֭[BBBqqg̘1y*cyPPPVVD%K+˗.]p٫WΞ=: LDD̫ˍmI8p`v/\}2h%7^=1dffJL8qΜ9r)@ >M2k+((pww2*EEE[V;wleddȼ+6MoM//SN)(oooAHNNJ&ZLxV6__3g(/^$HJ1hرcgmmmk4%Zk믿^mj,3r}Pu„ .]YT3ԛϵ[yr)@ >M2{{{K~r۷_[TTV-tbmrz<++K]<7M4qaXVXnn4C%P%h4XSZZdɒB#G2[.[, @6bU#FY8tPIƶ)=55UW[[ob,7Jˍ:k֬+kcƌXux;;;p5%''i4OOuU-͛ffի޽{u6CCCd̙ʕeF@{rcTثӹYQQ!۪U]rXɡ'r XDc9(R} ? 7~HJ1 UV+b9C!c?˭$P%h4X~%WWRuIPP:_RR"Mܻj*[[[XP~g_ryͭ[ʽwnk7WTTt)<D9h WfBBB탃L[,Xzj5_S%''ر/XXXnccӳgOݻw||v^rʪ\3S!*o>//OΝ+Q7ސ .ɽ2߭[ׯϘ1c:VtҀg^zu:AAAYYYt,Y, 0`@ffCyX>p dF}&lb֬Y++K<>>Gzxxk_&AY^^>orrXb9l֯_E2rЉ(b9|@cѐ> /HH   h"~ E PGu֍MnDMAC/X"(@$%% ??_YVVVzSwtԈn9r4X!@,X庸@cjX`Q "(@o^*?yG(K4kP &nZnٿ i+^I* Z"ñEKӞc13y:sp9|*K.M˪kM&iٙe?S`Nm֝?(S"ǞAr(fb0 T(2Es{{'-<N3h'Brg !3P9Ljfk5BsF6͠} )@,g8@2)S4gA6ճսOjNSorJVJKK6#fʤLќaYν-rssu0,A5w9'8...%%%33SoQQÆ`qr@Ԕ)3 [n~~~u0A5-ߞ#7''G1lLF*IM9 Pep.W^ǧaÆ;vԬ+ ˖-Сk׮]Zuxww~}X6nؽ{F}ƍ;w6_}Ո#>{\OY]~}xQFyxx4klȑ?EJc+VwP5w͚5R 3lLF*IM9 ˧M&{/ K.eu}rrrwUVV֮];-r[oբE\8lذs)))N3fLPP6Ń\@{:uJ?~ʔ)Ǐ7z-[Hpe9ZFKxoߵkFFFʼe0=T(2Esd.WVVvE,K$n۶QfW%+r+ҫ ӳg[jeMӦM }X~aT[G_?Ts/LFXΤetgcٳgQRR^o5޸qC?*\pua[]5k +Ϝ9c&+DP|7^^^4h`V-[,Ts/LFXΤeÇ[֨QL,7r}2vÇ,X0zh!!!Oz˗MFyvm_8 Sbã0I2ʔ5{X]X^XXؼysteY#";U_솲r5SqeeeʲFQ5U>}bcc)j}f͟/رmΝ}5rm֬A-6L/|6cS@ZHL77Wi25x;zR` 8^Tǀ{w_ϲ՞{,75qzZucYxKJv/c۵kݰ1c{}F=i?p\Nu"gj2k-Z4vXF$3fX>dȐ۷_xQ~ݲez쩬INNV_opWݻw tz!Z[mٲE_X?;'?'Q/idLfɒ/֝j~>}^~٣G**kڗXjPZn^z߃FL&=G,w ޡSED̗l䓽eHLNڧ_⡇:EJ<+}+?_yex3Ձk=F,)3>cߎ; V WZ%<'&&300P:4h@4ѣ$%KJ,_ťQ8`G<>~ԋR/Kꚑ#$$,'bFzãyfeeaajGjHQ+ ʹnq**rrO>TYy@Upԯ_`Ν2bĀjfb9ѷ) -;{7FM ov=NU!h<E+wfʖ;mEJΪ_d&[h{ytumأGM>P,73q6n{WiC H9s&>t?;5gt\]wV]cԔ)G+f˜ َ9{W5xQۼy))WdhT)׮>/ڇ~ %K2JJvO#=ȟ{[|^9{V7ge_34(hr՞="~'0oIZ1v혞XZrҦM#)By2Dr9";U4zfJTa3ii+eSSWȹ{ro"r?b}Z|w(8rL@: o|e-vv^mp?!X}f-4qhr3T72)SWT1BwX%JL\* [gW+0={?)HVV;[j,ˉHfMPr+Ƞy ;K˲̅m5U_,bG匟XT聖SwFa3IWmܿ#oҴicۘyw"[86}^x2<|~ȟ5jǚ}[:ujXnԔ)+f˜`f;F,T gL$ 3g,,X0]y,(k'j߯j.&F?FyIg 7i~^mzM,ǝv:\dWՋ$dkC__Y^V*!;O4՗I[f6+.֨I5hn&2L0j͚tm<;OE?ɂ9ycy&wY}ģG ,rSȤL9_1SU61b-pةk R730SlG ʻf=l @y>Sw)T57FrL[yކVNU4⭈NR!^woJVLC"ϟ6z-Qܚ1Ҥ^s_ĮƟu̎-VUIMrb651b \-+?:ge0G1o\Ɣhi׮R?ի@ G4n~^ey׮Uj9sqi -5ռon˖ͮn|l2q =ҼyeeY#THWnZ6Yב>x7oyy2\->@r+'YmiWʩWF?w_e,$X~zaIMrb*lR?UsR Ѿ0y=DYK3jPͫ?)p?)3M0J]ӽ{7.^8Qb||lmܝ'*;v|<4tSfy#r%X 5v7BBfJY|+ ^%屠 YNT ҬaSU%o2dH/X&Anoy,đ%WᓾgC6%%k4d̘O!y>IMrb*lR?ÁcQMKAĄ&u.ƶJ2{<)Pi LwwoGZVVX\]J\MEE-3 ~[_˖߷j{4)<ڸRYiMѼt)&t^~WӦԧfbM?o_nR(/Q&S!v+}ƍ~aS²e}}]\1s9O->@rL̜<:$|ݎ|J'wK,| ZӍLjʔ#3Ua_e3Lby U\-}1nʃF;BF/6~_pPi0iLF ԟ~֭[ʤeʆ\vTbyԀq2-,ݪ砚8}aӘby[bvZ1A";u,v &ToF/**"jΩ/B,gR2E,'א.aN +{9I~ҹJ2/--ejΩ/@,gR2E#ĉ|^N=Q8uNttu:OjN 3LjPh8:gڑ>3ϐ׽cbb4t1OjN 3LjPh8Zófe/k_$onhhhXXXRRRNNNQQTs//F,gR2E!W[ATZڠ儎Oa.XPO,Y@3h-0rF T(2EsT羷|?K=|&^ypGovGDDP՜fqg 8y,9@2)S4gAu._*8|uwK&gp6 we)-9deo[7kTO-tep)qeRSh0]rFKo9f/ޚ0f&w1dr^gj֠ ") T(2GAdqۥWڠ2L(}~N^r`GK"n#rrrRRRdDX =!-9E?SpIMϠzuv><|zd>o pxzbJ&@ifffJ")aH8@uʤLgPݍ奥ewD.$o::soTDq%+Ok4ܐdhnΑ.,0 T'?LjxX^QQ!]c3$o19%<::Z}Aܐ!eHF~~~~.HHHHQLFCP&5e @,'@,XN,X,>@,'@,'rˉ  @,'@,XneIXW=#*\iYS>!?!c<&a,~{1@,rYrxK='|μA+8p Xr·Eߥ]v3otmzzrYI2mǢ凫L^XXXPPTY&*^b$6s1Ll롒3U֨eHbyMRߟm&+R7;_vWwj(= Շf.>$OO{HbU.w.ͯ:K=맔~{&c̿/%>O\:r/3-=ld_fɭu%>zrț6v;O&ϗd~s?r뿐\ɴ;ZIcmW[w͞iX~[,WKWjE NpXYVy-jdnYbylxWr  vndIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/images/sequence_launch_action_plan_in_applier.png0000664000175000017500000012231600000000000030615 0ustar00zuulzuul00000000000000PNG  IHDR#k5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gU3zTXtplantumlxTKo@[?H12iHT@yWKhm{2zI}w\D.13sSWqũv_&p*xQNUZ3-hR̓ ^ך5Lc,EWj|3/ɏwX/õ1!/ *TSAvmqtN/)Gە_Zqb)XJȂhU."\-cPq{'d*o 1 xX(LV `"4D$L5<4" Z(% )Bpg%F|@Z.Oʚ{}ՊSlKdƠ3):r#1y$#7PkbbY9)[S*T۵{inv+^ڤ4/+cIκ~ケKJL[< T05LL͆ Y;eCog9p7Nk{5@1ǦHr5e̽`ڇlB_d\IBA9\K&LM FϏޢt\?%tEvIDATx\\8ERtJܹSMY,^Ei]6Huh˯mts1OdSDԈc$+yD]?iLX$It:"әs`33c<Μ9s~}s,Z! rԧ$A}Q'QOX& )k۲n^|f$/$7*3@IDf]g_hKۛ߿7$}Ӿܨ'}N_KmI$auEFe?sH}@_?y3gFGGi>h_InTf3E^۟|C9rD^aZ)7uL>DڶgϞk&APR އ:ɍ &|C w]R|.;!< *"Z~DJKcXSz?Yll65+R% y1}~ժae^ya̟Ϫ"GJUSO~VAyS\򯣾m9 W&o|7R]>'՜",ّaS]Ҿoԯ7yIRPa֮…WH&QTTI']lЛ?kL;:?Y<]{:. 2lrnׇSR\66\KUs[eNŒdlI?RgjNy KKתVQUeO{O4d17*#ۤ/ϹSdȨ[wSGE3j_|=t͇" Fc?Ұ.)WM,b)o f;He:QH^WzѰk>$'!fm,O22QYYblЛbӦ\92v\rs?N˄k7J=D!跿]ߢ&+>#uERɩIKU{?w H"iiK2]_~P)uuffx{d&Kj6n,VɸM;'+nNJ{g[VowJJ߲kY2nfSTLX?61Ѿo4|H9iX̬=Mb@\' 3He]ӣ֫>D 퓙<6̔^΢c9I5kU|RV;/wUU6G~{.q gN#x%4SO?jp.~ٰ_ׯ/˲/Ʉ aaUZw =wa>uiu…W97yCdezGan/pZ~uCa++&ىji)3{k0+bf)fo ;He=QM"իs}ȡC;.+|9ˌFΆO<idϟzIr< o8ꏵK" ߾n9"̖߼jqK ծ`:=uGҥ6jT_3w߽? d'XU`h`u[~S՗!,[<Ѿb(Z% `U`)X HD!qB}Xlp?~sډ\uجcvl6MDQVy?v9(r᧞dΊw.]PJ q]b:^+G IGEWV.05f$t&6/&n1I++&"NeO=,bf)fo ;He Qȭ_uD}%wLy âc~G Wz1--U!9zHM7Uje7W1QHS }!zoڷTOMbS>ݴj|Og'?yD? (aC7yS;=NԦO=ʹ`XxWf/Y`,*S,v $F~A:LfU߇ȴͪ\y!6;#JS>Yn2G;n1צ濫3G,ud"U$J.Rxժj =FBBc7 z׾v zOk7m;jt4ڗB O_岳?ӧUWY ڑG=vVonM˽?vOî6{h}0Q] {wRnJu6ffO1{SA*3Bf'Gٖi>_l{v~غc91yfumUۃ;',t1D ٳop.?o70w_ydP"UH;7ݤOIO,H/#UKFf?wUۻ뿦S֖9mItZ\w>-f%jSP:M) X^0W EUK}t^1 lE6{śRIgG#=V߰>_Q;؎bu{N <%Ꮬ#.FotEFNr2I,WС3›}H QD z%z3Bh_InTfD+/$7*3@ICM>/$7*3 >/u D}ѾTf*3@I7 zFo/$7*3@ICMyu$Q,Dr'M7$}Ӿܨ'}N7$FNr29Iu܋jg_Ӷ]߽Cv nM"IPi_InTf>'B/<{y0Yɾp)Zi2Ӿ2I8Q~8 ~(^˾pZf2H:ON 8Q0˖-22X/lȬ$fk6,ަMGYD窕DnnEI1I!}–-H쫸 ?"ô>lY^#<6[8*}Ilx7++K-,n{%JdV,6`{D!A_I|'cz6$@ PXX}pq8hgLifIfT 5ҝQId?{Dar9'h}HL# ɴUe899MݐYIf{8?fE5Jyyy{{{2쩞YZ,Qӕ .г(f(222:;;eܹs6lPWZ$#?_QQu(] ۹YI ל:44f|.8[VfxuYQͶgYO2~ڬ EZڮi][oewDa,~?''pxޖ5;??_JʠVadtkV5766\.o@uMQ6qvCC e%UUU+1+VBM=OUUK!q:R0w=/q+cz6L _c_?~w"*90fg{{bj5 Qq6{:@۲}^nE"pYR%YԮg$Tcw>ta! Q_\Ɋ\\oMNY-\ إSб=˜?q-juD,_l{+W_v:vg#QDg}nMӯ{|{б=7 mWt-t@F >=\vR:vg#Qb =5ȡ$ $ $ HD|-TWg#Q_2R@uz6/@KH<-#[B2L l$|b(@F(}b o6=ztݺu)ӊ~+Ŧjkk=LMM wp͛7gdd***C-;Ԥ#we Yʕ+UU98^71_P($B Cl"ꑑ 6 iӦi%%%rW- cnY_F։BVQJfffggLHN5ۺulNү͐zW%{*M)RY7@"' .bRd0MgggE feeevԩYNj%NÍFV^ossX_QS=Tiգ$ )Ek&-_2̥”mp8,f"l~X6Jlp+eee2_|7\bQGGGs` 3Gό<  :vg#Qtd-tttB!j9s s3J(̊:tuu~YLY҈ͫS37n`rؿb֛cz6@GfR2226l0F:YEjjPpS.(f(d%iQb+1$Qz wlaׯ_~``@{zz=*KNkmm299)r74;v쨮V3%kIڶmVyգ6 1Z NGFLfch[VXj^hfeDI*cϺ / 130p+%T^~0DH(!ի_|x( l$E=d&cn˕ۖ5/=ztݺu)ӊ~Ejkk=LMM>^]]:M&䮶 _{j`PсҔhQQѹsCuuuiii[4+erdu \f'ký0\bb8 MMM9rWfM\R%1,mk&HK4QPudddrrRs^+iӦi%%%r7l9R&d<ihhۨ (l1 IP-[Z 311!hQB= [hь u .S_GaEL4(Jhg2r$ Y^?l tOOϚ %J'r74;v쨮V3%I@ڶmVByգ6`.-bp8'@ PXXb6jlD]VV&i~p%>O9fQR+8O=ѱH7bŒ si)ss lys;::BL_?gO}ԟKP_^^޾4wYQlE6 2f휣hkkݕt獘D}K+-- N+++SkaG}"?55uhhz[Z 6ujLa+tX\$QH9 ˰gaxYQ-WZ$A[QQu~خi]ɷұ Q(xyi)7nt:2mhh^Bs8yyy.zsrr^Eḙ)r/GuMQ?6D!;b \2B!ۻwG^# "WbVT;;??_{u0 Y鳽=1;ވI<@JXiij ’݉J?Hm|ל}U:v(.R6m ϟ?4b߽,AzmWYgo<ް;tD 2(tJ2Hm@8e Q`*ږʃkSŧ}Ȅ|{k}h@/uQqbߥ7{qH Z 7N_? u~=+ވI/h)/v=0;)Au1ƴߣJR 7b/@K;}xwO R]FLh)H Z @F@@E4\R@uz6%-TWg#Q DRTGK@$ ^B;г%v>` yz6200PZZt:Ν;KOOOKKknn0ǫSɄU ܹ󻻻~?77r|V^Hʕ+d(QYY߲e !nڴxtZIIU TTTJd۷o/(tpfffggLHUD ȸ?--M YLEVVZX&nZ@;Nm677r Qo@0%%E80 ։"lJfwww/_*@_\_Kx<PHE6v颫KkGuz6Kk)ꒆ 6Av_G:44d`0X\\0QTVVjqE׎ l$ ^ Zʅw~Fgz[ZZ@D]Or~SmmkZMMD޾b ә˧@FzK>ƓDVK0ΉG\<$_>Q;г%c>`-e}Stْ4QIxkϸ^;5B Qb"DE?#w4Ka@D__}Tx˵?v=( y#b-T<7 ԉSZJ) t@ϖ/R޼_(@l$ o)9>v, t@F@KHP]D1߸ l$ $ $ Q QD|-TWg[ƃ?^6]$.4TWgiB$k6f.iW}+г},B*n>އƏ l6]s5$  l(O4'OyС#GH&Q8Qrw```q_(V8Vø%iVKM+Vسgρ$TgΜ!QcX"#NX%(xP!Q&>'|2:uj?8l/i~.K;/Ng(¾cK挌TTTV" [,SYY*Oז/j3`MMMZZYIweQ+/Y G$r+q٭vgddDcc,,eZ֟I(V\)⡇joo?tɓ':QHNo}[2|GUrxH[CqqӧϜ9s7y ?Ͻ^_MX䲟("ʈPvtt<2Q^^_L^nܸQ:rHOOL^:lA+nz뭑DƠ`wѺ ϵ#0.#0Ֆ݉\2QZZj?Qp+=ڇ_[o%2'C?=qܽ/NȘrFq"jp\rwrLݨ>)~;:ΰjfmk+4ke1,IVR;X7,wn*kjj.eDq"P]г( >}d=;83Co_wygL(d+Z: a&ˋpj1%%b;hVYDEa\jGVHIu2=::*2b .г}ЮFod}sCaa\:4,:K]Gْ7Q\."b$Y& -T,ǴخSUl8"1 mqtU4Qi׿eifňHr\ k1z۲8lzW H+]{K|Ak&7:BH@m߾=%%E.Tm9VIm$Qrw}[nijjHH(/_cǎ/DA Q((F&vzNg~~~ww6\.ڪ-h sΝ;gȧ['ryy挌 )UEEE04ܵ~͚52zL]]]zzzZZZssuǫSɄܵ>\zݾ}WVVG%קǏkmmmڄ򕶼[׌^})dVVᐻG ;k_k6;\UE1ka { lVHBDD+̠GN2nA_IHp%=N 'B!oɀzf7ߺukqqVR ~]vݻ׭[oٲEtVi&贒kkzϕj~dvNɕo|0zppP-2;/M7ݤܕbR]-ak6;\UE1kmv*ZvT)QsO]FYDuo09tj^YFcJ=ynZZ:|&l=<O6=66tY@hVfePwO mkzUXFvNɕ2ؘro(W_~@&wVuЯpV]Ŭ]eةj?OO}~ժ RD$ƐD`p~wwwYY /_ )))TC,FiGO-w7-yf͢(|n|% HW&dwWkc_o絰řvv6jSTݨ/BhsL?c^gZ=Dc~΋ެߪ q}[rr>-f}ymX䡔WYٗΞ=DH$ DetS;::N6# ?Sӧ,nvb(Q/m=F=Ga3:G!nᆧ~ZDʨZ}+櫦?G!o3JQ냾ꚽ(!^.žشg jSO#A3g۸SO>Z^-ڣ*~vĉW^yX󛗾KK'Q$ @0JVVVjC(eddh3SSS2_}`}Æ Qt5l=aׯ_^+w.-- YTT$~^6(YԧeY\\czծ(Qm߾/{eZ466|-,kvX^mʬwV~pV]eժU2ҕP!{QQQa/A Y yx⦅}Njc}ڴ sڿb^O?thGAeNzZ=t+.%$ Dlj+DVXt:sssGGd`r22_FD]gӣ'lym dɼ<)aNY_ )իWk2z̊4>>^[[VSS]lsm܆ iQKw1yΟ?/W&sl5W_ƍ<+4"wV~E4<\UEBҗ^Q+ד8{eZ|2D(H$l((f?=/sEcw?KX#e"?(E|' HK5{(8XRfk;g#Qtd.I$ DAH$ :v Q(:hJKKSRRNjLsN+󻻻.vV W"o.k߲o-UYWsssVVCh%^jmmvDfؼyu`ЬE2>O6Gug#Q#.bRPNj+cߑyHLII4( W2 \["QLYlUuM71bCCC $YF[l֭ŲZ[,/Lff6`Dг( .w:tvvv65QDW~mvpU%#{;2?ǣ>66nX`z̓TE((č@ PXX}JG}Dag>i"Q~t# gt;uwwIX|rDD x}X?06t뭷x{p QƦeWL_MXO}_es "$ $N<׿u~= '?ӂgZ ߣ՜H /h)&g7~ ug#Q8 г(R@uq(Z 4.,@K$ DHH qbap~X"}oLm۶Q]D1︆ XYr QDfff__vvL+г(xyDK@܉ڵ\?tTW,$Nl$ $xM'߯#=xXƠиH}Ri)`033sttT?v˜ҔYTTtܹѳt\Ć5p tF7oޜ!kXݷo.eҖonnr8֋x^/]K/ښ#w;fQT{Vߟ+|>ٜappPCCC2 >@"Q/n)7$2o[& PNfB-))&j~5hnZ\\<22"UUUZ' dQuM71b2v$Yƺ6(0:;;eBZF2L^K꠺A@FD300 Evvv6g 5h<zXzzuo++++ddo@_U|ܣaonn;#A@Yn݁dBnܸQ SRRh1Laiu7]=Y0ܖ"5+= [L~6e z"eBk<OGGG(ik$QD]ZL]9 ۲]# )//.[h[[[r^ Q'yyy;wԜ ) 6X$ :b5l CCCjCׯ>jӣm0Kj[(vF5+= [~wBLg/bIł,0ǖ"qrV999X$*)bw=Q^ɒlC(d(FOE-kXT{Zb 9nSOPvjwг(אIR%9Ʉg{{b@F QK/j'z6o).#S|*s֣ɱہ/z߅w8=|eߥ{/bʽ+_&glP]=T8Vk+֟ QlS|{,CЁ7n,n((W/J'_vDH z8}^m~= B}ד(8? R@uEt*NJl$p\CR@ug7~ g#QR@u<@F@K|Eh)иH -TW(z6 Q Qx "$ $ $Y+г(fkZ @FX-ѣ֭KVTT$wו,]grر즦&5'z6X'Ndeeܹ34v%#GbpA@ QtdSYYvAL}xA/( Hoڽ5{_39Dcq(XuRBv^B/'@>@JDYlRD'_/H&4Ka5qXiuo`F;Gq {/s ((DwW_\Wu g(إNY]OD`ξlk)9>v, t= +г(xh)bqg#Q >@"QP]h\$Y+H=((@<Ex>^]]:M&䮶hˁc?88 (--MII;wN=TWWlEZ쑞 sss].瓁 ؼysFF"lOgAFMMM9rWfMHVI@L8l߾}k&+"GFF&''eXPJ6mT\\<:D,#DWW 555 aW%CD5ݲeݥ02z%4#NP[[k!B-uV)lNRk +!WDsd%e.ZWsg9n r(8QフN6קM˄(nkk^+Ѵ4m:;;[m4raw-JhGzRA 2l1߯MCzTҗa;kvzzsD\ Dг(bNB0wZ,+VLMFR@ PXX FŐ7V%.++~6 vA-6kRQ`0y&ABNAA#G3#BlH bN"++K}(D~~dTQ dZŌJhKcY!#_bvSWZ卍7nzu$ N Q5QՕbuBjjPD~#p2 6?f]lf%(dEIR 2l tOOϚ %YJ<&r74;v쨮V3%bI@ڶmVByգ6MH8? R@uz6q @KH8Aг(R@uq(Z 4.,@K$ DHH q@ܓgxi2m6^D Ym䘘n  Qpe6l)a^Zڵ\?t+$mƷBs @Rl$ 3h)̷ov].Weeܜp897oȐ***ZC]]]zzzZZ,͑TWWN :wzNg~~~ww6bbB6-kb444}iNX~nnl󵶶j3eA1: "D`ƉdtLH~Sц.Qވ+AHf(JJJ®/V__~马kQH_Gu/o0kke#DjjАoHI={JHf @0DР}SUUeK|#//OO}ˈ?컞jkk]jjj֩i)ň'(2mHb PnnS(rݽ]~ i{CD aI}I&$pLxe}/@=RHcOv=#Bnl$p 0פ= >^皽]g/S=7+f*2/,8Aг(nܸq3 m!U$ P]ae}״_i+׾];G٦2-TWXlj_A<~=@ϖԉ֣i%'%(((ڝkNJ7vP Q QgZ @F=!h)=7P]'z6Z 4.}@K|EXh)Dг(3N Q QX2=nݺiEEErw]ɲu&ǎnjj@DY`Zʉ'vv{zző#Gd1n8ΠcX1k)a'.sAѳ(Ĭddd?^?gtt4==]%{b@F˖RBq:Q9c Q(:2飣9`PbϞ=@# :v>E& R\Gq A;ѳbĉ٭PhrrR&nw=={p@0{6رcEEEik׮5=ǏKhllHqDHflOzmY!=9ӟ+,!_>1S\ jK>+7}o_5?_hu39Dcz$M\Cl)_xک}'@l$ [ʇVnzYO-? 'L3qXiN YIL[oNuշ/Jʼn\cst4.7zWgP:vpWR' k)a,'бDг%i0Goȁ/{HHf;'NPɌDHHH -TWg#QאP]D@KD=-TWp >@"Q, P]AHo/++Ӧeb۶mDDLLLdffiw{{{nDH:X,k׮rݻws Q QD'qb9ҝe @sl)7op\`Pȴ1[XSWWlt, ^~ CCC22ɫ+г(bkȀ[BZ999)m?L8pbae˖"?11!"QD7[,^d@F\Kx^ݾ9&H,@K6 u~q?Ppg#Qp @K3OoWpg#Qm)ܸqfx񧊴}8A0d%Q +}mXjagTNSw>xa>@F@K+6O~wCqzNh)Žڧ'%s=Dž\vRDD;}xwO F]DHHH -TWg#QאP]D@KD=-TWp >@"Q, P]AHC555uX*//K[SS$e0̆$ $Q'>Zo|<5ǘ`#uda F}D8BqCR(N".۲e$L,bOl,3BdSD$%!X'Bi43̟ϧ{{;"oEBRKщ&*z!N%@bd6)(>@1pE # K!a,gʏCis4<2#Ν;799R)(*QA+$+(-Uo3 LM{5zEE6<(j_8\Q(V Exk_,EK"''[sP[앐xӦMկ *S_CW˕ꫯj+jkk2511QYY鞧Bڦ/\#99yϞ=MΖJgs_}'///##C'==]3C###cF-= YUU(G}Uou`]Vgʪ3h`2(nM4ӧO(O5~?Lf/_\1 HZ2F1666J[$r6EN|OO / 36lo#Yɓ'e&--;IK;88#/77rin[ggg* 3F+fAOOO}7o,Y#Wk?xjxFBWw=|+ [BT6w=--zi˖-!* ӣ6e$y@Q*~ˏc555V*o~Q?qMNO($}IOO|xjjNbʖ7L]ڦ̖g"®(FFFrssz^[ =RJJͿ78MPUMϠ(Pn/33sΝVbǎ7tSxG*z-uӢϧ_ 0Ų|x `(B__Cl(e )(VJNh'P\x\Q+/~ ׫ɉ (8=2u &*"bwȆ0SrE#\c<-_h[oȆeV@\"0@@Q(P( .׿^QQ&fGBɋm۷c(0,+"zNMM7b8uTZZw"" +"pٳgÆ ѥaE߷|޽D6<sKp09Յ+E!m2Ȇ抢XΗ&y^%yF___{{{vv, 2骪*۝c^fZ[[dqttH9/^ԶQlٲ%%%E VJ{Nٽ{Zi࠶J!YYY~ӺLdSؘ$f`t٪Ӭ޴v'oBcrJMMM<2#W࡝?^ZBdCQpS0S6ldnvvv׮]׭[g%.++\vrrRt}WR/.Dضm[iiĄlV]]]WW4ɤe{)yΝk׾\ C]6M2-s֭"333Ϝ9_|999yAE~/753]CLP/`4ZR{I V[վoczrձˌ+rѨ(x2P,دGFtw=j,Lk {>/&$V [Nv~RjwrH^)+ 9xSd"XE8sU333#izAφ $ "5$c 0Qns:Rj"lb߾}d6BQSZ򍍍STTTX)))7oϟ?6knn.))$ɓ'ճ eeeeJզuY5ɪL9 XUV=|"k4Mǭ/쫧(BȦ+( @Q,NQ\reӦM.K;v>h^o[[v7,9++KS333iiiNr(dǜiLnnPUlz@2iI z2m}ߊ&VhUNQg_r]]{ZsWduhw(\Qd.Q:=0ׯD`DN{.p\y{""x +"2ihht~c痨sυ#oDQdCQpSsEQ, ---TUUw3n3ⳅϼ,KwP( R45Ng w>gj C( .<h+mbn"><0WESا3d6ċ:K| Ħ` D9ѝ7-5}VH@Q( y(wEg\Q"\>{Ȇ!SsEQ EO\WEx `( :Ȇ )(PD6,+E@dCQP͊"E@Qp ,+l(2<0WED$Lǎ=yecPFFƮ]0u (8=25E?޽{f'OEl& vD6@Ơ󫪪 (+733W^ȁȆ0W5\tIfrr299Y);88 Eq+bœ ˥MHH|7P0!99yrrRfjjJ?Fё9( YEϨNx饗RRRS TA3<<>333;;+3'ߟY@Q@<*<|~544T\\gVGqq---hE|x `(e`oZ;Yl(k%x `(;-dyWO`1P\x\QKgobK ^9)ׯ;s@Q(d+®ՠā{;?t64!9@Q0Wo:=dF`y5-("SsEQQQhc3K$޻(b2(PK(#ox & EfFUC E@Qυ#o( "$s &E( SsEQ EnC2E*uדVMub]݀hsӖ-[P/_P5lXUE" gpȆ抢XXQ^KC 5OMMi9ZҢ >-2C=+3#8EYz /0GFF$52۴XcׯM'%%l]%c8R+]fYn5  EO\Pnt033SPPp뭷Fi<&(T<9 yVVLȆ@Q(E( @Q(P( Px `( (g\Q@dCQpS0וW6?F._+**"CAb( )抢GQLOO,[U @8ԩSiiib!( )抢XE낅;}Ϟ=6lg$_^^w^Of!x `( ENtuu(PHnP(Pwz.+??ppP[ʕydF>/77vgeek%($jQfvܙ&UUU*eQVW;vPۏ{<ijqqŋj߲eKJJPYY955c[7q֦PR>7;;[kАM9HUGkؘj_=chH 6&gS1gP(%lI&&&fgg%_}PZZ:9OYY,jSSS:$3AՙjBJVUHNzA~_933#HvnZ\H6Vwܰqatv_\nn:RfMoݺUeεMՃ>WA: ( bH;==]=,3j^Uc(̙3 QЯ7-\dZ~ffVbܰ2Rdiap86bujwzMW`{U-NXCQ(PA* t_^元^Yb#eQUXm =VvMͲ w={VEv#OZU.6))(VPQPW?+z{{ը}r,y6?55EfffOǑ˧U*{lذAܱcjUȢT!_߉$eggk崵K☕%[qSN-ہJ: U,;췠 \y&J'JxCV&gUp E\QQMwg˒~zOXA*D\>{?E@(P0s(hU<抢mE/N 8~Tg^ߑ;Ƕ=~uȏl( E) Xu~6g]wLbSQ0@H(+@Q@l* -bbb tfaą9$D>,7d'b<0W,N ˚?>b#x OE+zד͠o@ƅ`` ( )X^8cﻧǢιP><0W@dCQ g\Q( "PhV (P( E(Y<0WP3dx `( ()+ P( SsEQD6OEmرc'E( '''kc~ 8xOE8y⥗^JIIOl(Ȃ )(VጌYw=Ԏl( N sAEϨWyE jhh="6ZZZ0u (8=25M}l0Y@(uw[*d=(b'<0WbobK ^Yys=GO؈l( R) 5(q ݩ M@N( Œ[5 a~w_2^Q(B8Yr }0F(PK΅#oxE+"8ԐzȆ^2抢X.yȆ抢Xgg/`(  sEQ( O5F|>|~-@8flSsEԠ(b]vG>OE=''n}[n Evtt<,O@!FDӧO(bwww%)p#E"6ΝCQr k;0g$ڌǩ><0ܹsr j9Gf$H8#frr2N/Ssa?>>.WӧOE"[d6LbD3m^<0W l( Ss:BQ\Px ` (E( f (E<0> rرx3Oqq,~JV}L222vEW #oypzzݻgiooOKK;yAQfng @dCQ@<0 R(z{{333_y:1DD6SRRR.]_399EOONE@ 3RBrԷ >;((V,$''ONNLMM(:::RSS~?= vc l8y⥗^JIIO EpFFF{{bໞ~jjjGG=0@( <| Y~Q?~\FKK = @QG|co1COf3O:{fh ~Dv"o=eGK:w=zE@` (SԠH}}ZI!' l( , hiG¼fÊsU eEQ\g%i2ʺ;8W\+ g)c/}zӳ_#*( "[\+ ,x(EA39EAsu^+P( @Q8Y<0W""xx OȆ`+DD6)s(}x `@\(Y<0W@QPD3  tE'!VLNNn޼\>*+e߂Z '//X6OUEQ\\\__?66&]]]*+SNٽ{% ~e 5559s&D6 @(2::Z^^x\.$/^T_566&'''%%][!grJMMM<2#jIrssnwVVV{{iE LزeKJJ&ԔΑb%v嫕UUUmmm;|P477KE` (S$X~ɧgffDBs֭OLLLOOz$U-644NSVV&j)Νkך!55СC2#:δ"F6۶m4Cͪv,**ڴiVŢ(.9dӎ*tE!% e/;X E{*ORbzzXfjB w\zV펣skC# effz._,9ιtRSSSnnBeEazD ~s6| EABɤՃN2iâa>]P' VTTHzjgFk٢ET[[[TTdfUJQLLL(^vp.0> dffȼ|4tcjp0F$crLjdf@F]O(QuqWz*!@l ]Ql߾=({E@dCQ@xHIIQ1lܸQsREbbὥڌlPVV699955UZZ*?ܬIQӊiجdttTO<<{.**nК2 ^umq```ݺuR[^cijرcҼ ]Og.P$mkkӧXKʮ$nw=չ穭ޗPQHq\'CEV4l ,2//OJv }}}GvF?!/ms#3a=T5k GaUpG!~7/Vg/zWޞ"E͘Ͽo7{`|DT?{C(Vg:~}n.z$7w \Q ϐ)B,RzF Y`EDOab鹏WJ)+^ɳuJB쿱;_Q+p.Op*'pwW?^1Wb΅XS 8Em\B J`$=\Qb.(E0^8cﻧ芈"@QPx `D6E + EO\Wl( Ss:BQ\Px ` (L(P(%! ^}``P˝лh+ZLNNn" IDAT޼\>+e_x ~*'<O^^^CC b/b xSccc2?==եl^Q:uJtݻe/Y\ŞP~vvvhh)--̙3CDSE-6Ґ@‚(ڢ"cQ5ĄQE \| E"==]1 !??2}$ 33gffFS1EД^)tKFnĪKҟǏ%^Wo_(osd+s("6]lll,++*--UO)$&&jZNWW74%%E=ưqF?GaUU ( e5"BcZU# 577ɓ'Cϒ&&& U]800n:Y-j=c :v4{Byד虦& n̕0X#sO?ާE!m]]{Z,DKK r$'n|2egg'$$H֦WhIջljjE! s\'CEV4l%YYYe^^=WWQQx@Ikjjc -f<2cyO%Y_tB~~:} E(HѹL dPQ({Ĺ}lev{^u?"jV\JC7޸|r N\l( s~W4 quk &&-!\ȆX3<<;w$O73F@+,lsqw<2CT t>΅X1 Y,ۻd+(S6㳏<2G׊bAZ[ΞŒP%"@Q@$)o^DSQ@Ek׮wܑE_um @d +(:::hEg``@tIwwToڧO[K'! rܹs!)'NKY"P:# 9= t>n$ B@xHB"0<0Wpp#ɿH"&''CR)'"5<0W D6$"DzjH3l@QPx `D6E + ES,*n\a`ȆQ+8#"Up5`rrr^r%%%T_9<<\VV(tww/ժOHHx l1(SNٽ{% ~e 5559s&$=Ñ[}FGG=咔ŋ䤤ֹk6dW\IGfdQm ɮ|~~}sNGnwUU:JYYYcZ~f–-[RRR4Iߧv+i,_hkk3櫠Ye/@Q@ ~d333"!T.uVɪ'&&eiJKK')++ER/jaڵBUVjT':tHfD'ՙMͶm&+fLQ;mڴIbQBti|EOD# G2] fddاj1==]m,3iiij5 \.{EBL\VT'xV펣s_effv^|Yrz{ҥ\#'¦l 9 PBq#Nr\âa> u;)aSMk>tKQ- LEEE6mV%ĄK(YP<%33gffFS%/(((Zz{{F?/6Rw=TSVV_ɢ櫪GY*tE} E`c+OIIIQlܸQ%sREbb።ڌl=155UZZbQB 9 ԢYH(ñ*߰YsssII̟|_w$=\Q:oG^6CVC0@( 0GmLL9ޗ͟@Q|}k|1?o>ba|OV>_=?s.(g.wiV+p!FQpzsA 5r` 1\.p|wOa ES0W:P1x `@@dCQP0@( @Qby`|OȆ}733sժho?D`@dCQpzSW_}5ړ~ s l(ιnw dKWÒ`'''7oz].WRRRyyy__MeeeZ '//)؁Pѡ(V<GQ8~llL槧>'ԩSCv-{@ee_-Z١3g΄Y؁PN n|V1W\IGfdqɜ~d|I^322{<Tr܋/Z[[mjjzΝw*-tR͕M´̈́-[Hi1:IgjR[[6_(R~({S(I E]jhJvtr2Y4^ɧ;v0Tz~qffF$Jn*4fzzZhB#2"}jSSS:$3L¦|f۶mcdjUXL1=EEE6mҪ^'}eZW+ )Ye({(EBB/YGFFyIKKs(޷onB*U_FF*_T6-4="C!WY-tRrkkvgт9t`233U;/_,{ҥKMMM"<^__/k( #] )?"@Q,BQ, Y݂ۿK999sh7/Ԫgj$!fe[hZ`EEdW|[:/_,,-**9UJQLLL(^(Q_5F!?YYYwgffȼ|};FVڏQXI^fLΠQjJN_ɢġW+۷~/S~2tQ{`jjT=hxi9]]]'44%%E=iqF?GaUU * wj(Zpe5]$B1= 577ɓ'Cϒ&&& U]800n:Y-j7=c :v4{Byד虦&n npy @v""H{*E=OmmzEf؆r$mn|2egg'$$H&֦Wr\ɪջljjCEcJՁZI᝝r.+77Wd8 i~VVlVpUTTx<)P5FSɾk֬1W /JO߼ DQ` (={ˀ]]]{ol 0@@dCQ\3}F%nɾ}Ȣ(@sUUs7'n#i$L \Pנ}晹ܹ?Dv{0z ( "Ԡ%dԧ撓( @Q,`bbģSK˜;}C%O_?b192aÜ@T( p|V Y,绞S0W"=l!;rd[(S0W"Žֹgd ` P t>΅ )s(g\Ȇ( aP,+ E<)L?}4^Ja¿I%5S2?cPa;a>􈜰zST'RM2-1GN J6aL( |Q)Z*B0_#,WQ(E2?Q!GmLI|w2p;!79Z_\JOS5B4ӾDVΗ %!6zu/Z}xkjMv4/'=O^ڰ9?'W%R4FJì. V ʀv( * 9}ucr/Cl( ĕo DSyYcX9&(^|iGO$2LbXz8fG/Ƃz^SlBoFEufiwb]W p`l jkh:uvZ_gVVzEq(7;8 MK|; }` cBĀD^JY^aHX%1NX0-Q|v wo{fi&=\}$J}$%bBب6ILINI`\%ʈiE2gX5RZ^ǵ-f>-3H+YU%JД6ȧ6_0+"R],AXv`l jkhdԿE({GLƗ%?Kx3XpMo}HĀ8s- BM2E?q~LTF@AQEAr@DQp€ ¿I& Og( L6ɴu_gL:]~3u7G~sΧ~~C '%l&"Ę@E[~;::WإN>}D!G-.= pU<3sǿIcS/3n( @QRSτtIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/images/sequence_overview_watcher_usage.png0000664000175000017500000013262200000000000027342 0ustar00zuulzuul00000000000000PNG  IHDR:pX5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gUEzTXtplantumlxT]o@|GmB)*U(A4@RZ΋9վ)3$Tp} *cӤdF*D)\?a )F܀&c5!=`;4KQ \f2 ðQ lGs:"4BM>4 n)N (~cr fC0H NZZf"PUwȍ)ʹ:ٖN4gg1BXt"~<?ifU#KF ' Oo,}+mG>qIvk>`҇9)ȇtH;E|^sl=ITƂu 1aJcg\ػr9Uv ؠ((z[F8vvXHT'M*9QSק>[~ <,s+` Zh_{4aD~_I7;̱6`>/VDΕ.oSg:O:_A;솭W &\_Dl)>|IDATx\Tn'PKjg`i 1^JbMPBkv뺬M-&ks)K &C `, T(dB]c-Aab ozr2sΙ^gΜ93a5koN7UzIݾIqmwu)UaJYv M3{߬JSlM?|#C?꫒df/nqmrTpvǷ?}gWc+V_u[g~v! M/F%ie5Gev\G~% M_w56_̲yٲ?Ihĵq7_/\DBX{?}gWUI;rĉn n l/<Ԥ[oo/;ˑJ_o÷mƵ,;vP6@\/yRסGٌk,\?sJ[[l6^F<3l۶m׮]---W@\#vm۪|>_gg' fQ[$mݺuǎʉ'د6^]>7_]]e˖g} ke[ R\?^V%7qmB }g蕷Dwzw~JVs$U~LZUUoڄ'/^da7eMKb_dU_PV۶mV@\hov/Vßk]^m=P5&Aĵm+q}VK[&Me5>&&ZgggKKKSSSWY}{3C yO$%T6 z{{O8!!J\{>%mܸ߇ēO>m۶Z&AwwƿݠmIf<̦!2ڵW^QYMfh &:Y,qMB{ċ/(AS#x{Mĵ ~ W^i"?ϻ j6^}#O>C|T~ĵbc#e_qml0q&԰;.ĵ {SN?~/]5-?wLH!RȍF\ q@\#L͸f;Cq@\#k5q @\#k5⚱sI\{q-q}jg￈kZ|K+wzq-Tv|ۛj|Y:P;iGb,!ykoo"s^.( y7ًB=isM/@\vU_uyey(?r$̆^{M$[5m~~o'ĵ8rHp_ֲٳgO:we~{ާ$LkqǷOמ*@\ЅSg^L~o9ZV;~Y}MO|]2S%6%xǵS uvv34Rcru޿<4ewy!/,Os;*16qfmq]]]SSӁ$uww$We8zTjȐo'/YR~{Wo w\{n۶V[KKKgg'qq;E}˺Kִ&:ձGu%ۥc\6qmweIluuu>ĉn`fŵwrnGo/g5SGv6׸]0oמ" S\~מ}ٚ6v702К>Q $65@\;c;ёZF\kc)RoC+, $6bcy^X_Λ}#ĵ{x5uɚu۞+{|cqmժ@{4>FyꩇeЊk'?[*}ĚZ :TۙZ7(9N\۵)Ä$omҥw22qOvΫRS9ӤBZKY|Z\я]p̗BJVr]\_X7##?p|>{=w݅Sg.}ĚJ]vT<{徟k]?!=:߽]ӯEeիsssp'?yBR,)ӝ//_-&Yןi)_WŸz?-OJ{e32nkkefo5f#쏐ld&,Ozz&5ĕ% ._{j؛#M"a\KI/WzNa.*$EF~R[׿Pf+)#LݛB\Rq'V|k2~;j1 [nzS{w/9WѸ׮is$i?E5f$-]#./Wqq7DRRoW[o=._KyV-x˿_5ܸ&xc7(ڇqM3%Q'&9qbpMl/ÖߛUqMVy%ĵ5g,Ƥ(ߛ5łV6g?_]pVS~bEvиԴ12/?(?o0bE\:q?.`o6?(=_,H&;Z~\<Ւ9/OuLhs5535l̗n@\#31EDDk#s@50ZbbbUU… C0׈kLkUh;t͛7GGG\ymܸQ@2Q^^zN ^}JYF&$$?~/3Ķm$n;;;ٳ~=z4--MUH7Ss"##z[ ת"K[$_5ZqMN||VA***JMfm}W{{V<<}vƵ'OjE]tr-#szzz j״ѻז={$&&-k5`ZCC?=ݹsv:ZtbY2|hS_׫:::H LOO/s.ؖL 5q-;;JIk4''y_tkk5`JƵs΅kYG /Ӌ/x3t<⚦K{n۶m[i4Jv v1@\V"γӂ4D#uvvLmmmmpSWWW3-HC9(iZww7 }#OL9kZZZ|>_Ӹy4Q!i4/}kQ0HdsĉqSLDHsQҴ~v1@\#=$@\3Cq[  q 5.zHҸM!q kk i q ݻ T577G ׆ i11 Uk׮ cZƵŋBɢEkq|x%rLq !-[Hb|'No ĵ{=-r58O|)"6[)L;s&Pk>lMMMSSS[[q }w߽[ڲg466x㍎!7|oɊk!rq 5㸶rJcY싊<d5{ާ?i)eyY% jh-۹sgbb5\#KKS﨨(9w\OO[neí{w}2Q__o[*$m%°eJtyyyttt ګqb׀)o`5a27E\;{n:}GEk˳oLyW}:وu9z?l|&oʴD1ڒJn55Y@ яAZ5a322N j),Z17CմDmVmrȫW4͸& --o% +qˍ)0[i+s9"rFן׽"rZ+"YD?ܹ 2GrZ |گÇe[oU= yx!Le@%L8m V,n+ѯhV7kO?SO=%Aŋ5`*5;ksΝ;cv[rIR $&&jhI\d`TzU=,))23h\l5Ma؊ŵ4Ĭn5 ɟoSP;׮;$tOOޢ@Rݻջ'L*ptN\Y >)3`>!:ujF6E+05СC~ f\P D UUTMy/Hvva\sz5xj`.ꒅ_z饻K=z;lŊ~ѣGGN䯽LJTT~[L vN\Y7bK,LIIQWkQV y/.{;;;{=kMaъ2،kZCO<~GyD~,0~$w Ќk?N̄vOv7;|Wb1#[np\.^7t̗Suqqߙ+ͬ?;v`,iR{Jc33C;HLLUy̧~Zf~醆vm^BS+**V)ZXfqM5D6!ژE݈ktk---> xVV9n1R\?xiis=,0~$w ;uvv8q x'a゚ KkO>$6)Y`7>&я~|r]s5K,cu2 kQ56ry@\k*,@\k5`BڢEVDI@\#5 4q ܼOx9;fffs=wؖ'Nڰq1WWWm۶-l?%QrdC׀iVΎAF9&Ȕ㳳ShmmmͯLC9&Ȕ㳻ShrFzj_b(Ǥr|w ׀M΅|]/BK 嘔#S~NZpjq-A@ Fgk@\@\h\H q 5}6*~lvmGe !/NQK.͙3ɓ!S7Lvq#'i&BP;[n=&m1k]I#x[.]tQ-V\\,եb/^{1 RӧO=-gwACCC\\-Ӣ~0<8iVʬ@+V5D&cpۉkf5 ^aGWWl~a{zzT!ZWvڌ Yf`` ///??_͗e+lut&DuWZ1-\.ja[nʴYA;D& O0k,tR9IKKĶZL_> gN9 ̙dn`)&ROMu ,((kDkJ]K#8A v 6;,uxXV?=6H,'*;C^8::pk0 d-//֒rnspÇ;sɴg[)5~3tyUy |p9ޖhغgb&؏kR uTLP}c<ǭϝf6|wgn\\H:0h\x˵zjo!h\3+P细\R;v6q[)+0{ilSN\cr~kyi$RHF  hj"DxԙOxO65p!-q 㡠ܹs7 C~XnU%nw7!Z6k\?T^^xnGņNyevΗ}<=$@\#@hj|})\jm{o  4#Imw-CĵPk?oo"ĵfZՋT>q-wwۢF! $Z/o6e2e`iw!]s-b8 B΅SgUi6@\!ZH6uktF@\@\3zH5  M4.zHk)v6 bY B=Mx4Ve׈k! i {b-$=0#&8U}kĵqm=dllƍ͛7qmq 5or7oެfvttdeenәzپ9sj+ǣFDDH!9992ٓ,IIIf@{xҥ\)S^P;#gOS+//z ꫲbbbaBBÇŊ2Tŋ~tsi&U7Lkkkff} hc5U)!L'9站Z|>իl-3WZ%i@[Q|AXvmFFFOO,'==}֭2QYY4fffk׆;dVOYlҥ]]]R (*bF_ &)Gb%<%&G e>lyzeeeO6[@N2q1YX!QQQj fϞX, IQȓ'OZǵsj£kfŴQCө-4F_ k+glb1-ox i,fV\VAښ-؆5󥤤nPjkkeB~/_\;i YoBtpY=^1z٬ư6 ۬Ŵ-ox i,fV\ƾ}ڙ[YxLo.YXɐ %k,<lS]  zoE- n?5J}hU}k5{ۯ{3j^TH:c/3@\j=6V;/]jPڙVVs@\#=$ q qmx!k @\ә iBt+l̜qCxB-Lz 6fQqqcrRIk#xuUk೪Cy|||AAAWWlq qJllƍ͛7qm49i55188x5kxb͚5~Zǵ*p8_1NM||jkf=XOOa:3= DFF/riǣ$g 7kz,YIXdzUC/::ȑ#9yb<\q@eeeVV-[… KKK%IvoHFFF` 5k'S 6wttc̮-kZ} ]PPHC-[fVOjU5))aKKKrr̴ᝡ֬Y4kA/>][?SDssߒl$sDkkkBBz;CNĠ;CEII֝ikjjפNToՐrzVOYYY\\sLC}W-,2q!&m߮[8;o/0qm2;GK5ikkqmjuF~=w ^QsjGҩ{Nr\kqm tFpZ/gwAjgj@\kSVEjBG=L+mfigen5k/KՎ?EtgGZ{|Cn#Z(HۻԳ;q-uO~i Mvՠ©3T3 ĵm@ Fgk@\@\h\H q 5k p!-Cĵm@ 茀)o޽`&innk5`*wfkC50ŋWc[hq .59t'{Ykq q !x_@\@\Cŵ캺Hb  !Zffmjkk%tvvWBq{ٲe$:w ڤBZ`J5d,v6 ķijjjkk MnSF&=-MVTTZ!cF}qm}׈kqfz\Cn155fqyJR?˗GGG\nA[RzNsܹeeevVY%F}ӟ̏͢fp e駟g}V_yͪsNY{dl=jDEEI5ek}Qq i\\icXqmݺuoLHpQe3I:{lmmLK`'|+o{UV]~ eX ~zo|CKJJdZJ`2]\\,Ywt;qMmSCdb XPPv7_9n\``-87Ce뮻Nv\qMpi LÆ0\~z#GdZœo߮"̼k԰L %4GJ[C P21׮=O=DۺWL9/^$54sd .?ތ O-=aII&}YEEEeeecc#~\`F|L.q\cr15q `ƵC<lƵEt'{YkO|@\0fמ}Y@\05477x@噙sO6) 5`|uuu۶mۂNkA?q @iiiijjsd^}-{# Mn5|LgewwwsĵIy)Z9]s{_dr50eș;4Vc5Ⱦ=ϑ׈k@ Fg\\Vk @\qM i 4nSzH6U;hļJ(W~߾}QQQAj6m qmǵѴ"))yzoq:6.`s5F\ R\\vT~q\v :tPZZ{Hjj< 5džٶģ>ΐqyMV\3[e ѣG###7m40dرc>E\ |t^rN355ٳyB&$x^Y>!!U[fճg/++ de #""\.WNNN___G?*5!s ٞ={/k4*]t)77W^NNQQߛa:u+V"b~Υ9Pfj%K*fQYMdhI0;̎ 52w\Ifa^]~K[EM¯(`(âZ ;{* 4;8U7 qmqm>oppp``@:eiެ+޹GV ?e)za YvmFF̗UVsӧd,%r.WIOOߺuLTVVkR!2xҵ;)((jᎮ~y9\vYf53g} _X2;6ݻ%9bUI2}k&vZWbѨGK6W1l̓Ӭz-V6ڸ'7:hWw.;N5-'ݓ'OvjooW.\P^2QRRҥKe_ b###5M,v옖?͛WQQ!;&&įʬ,R-[X4M]7$###ɳf2ܞuojݠ׮5܍wQ9n޼Yr4J&vgvM5JYf59[NrX86d 6N+2;Z W?";{* 4;8mVϰêT!B+544H^oyyͯ5_R7J4 +ûF)!::Z_SSf>|t<+sLNNU9'%%Y4MFyyyRf ktF 5ĵBZ75=$@\qm=cV-:M^k6?*lkņ6!ft\3;#2ʯnVqqcRE4[<ĵaצG\ݸqy5M#a{Hĵav92UUjB3:zN3!!5䎎,-˨OB7<{<˕}{/++ԾP}kNN\9sSH92GbŊYCdB_Ṽ!..N^"::Z}a߮ӧOϝ;W^ѯ9W={Z7b[M~%z2SC}ٳG}aWRR bVQa''_+//Wӂ ^}U111>h͈k!ZJE rSr.Q,??ygppPΠyc̾NG}%-̿ڵk322夨F?.KI1 V_mo8fd4s?bZ=Ԏ.eL6#kk! K&/^g2j8_|ۭއ XQb[LO+*--V&˭ÄEj5;;[NޱWj޽{ZmfC̶5k^XXhzÍk#:$;ܹszI~KR)M\W__b6K3vK(Kۧ_&4}蚜 ++&$!!aݺuzfkvblttMoCmATC6x]h3Y-܌ɸ yUTTȴ)));͚5kXq-""Blٲefq-33S:`x5aÆt^coڴIg`eK ڵ J@hK*oI\dǎp˖-Q]&e]f!foZ64pSWVVfee͔ڪmbͪ4~\>ZFĵuay9#pJ9ئ444HҼ^oyyY\+**Ri.,h)M&Mj\x1??5dʕZ᭭ bO˦ |3[P3%j!fo.M'5-ixghrrrssLI6IIIĬXY5ۏkG(7# ŋ)Fbeeeرт)t*,떜&[ aL~EIl=RT}mBTh^Hr֓zzރrs`*:YP=w춿ya҇۸ nS@b謔U}~ J\~Gy鿧M5L__Sk5E_k5c茶'8>=$@\ -\H `f/C5@HPVsm~ikwԙ#E?qM@\k\H q-q:C5:#  =$&=$@\6 c+Tk6@\/n[~&Y̱~:>6pH 8X Mg7n7oTkjbppk֬x<Bh.ݳgOrrL$%%566E+Krss]."3qMa+1!hnSOOOߺuLTVVkW"vx5Ixx8+1!&3jooSĵIl[{n_Q}Ur\Umw/Ͼ0cq@\ UapZ#I\}knӦ>*VʄifzMZh-y~缬9_Azs&ڱ 5ĵ}uݟ{Fk/BZmM Zv'[ Z;ͺ5@\ ij.:N50o5Cĵm@ Fgk@\@\hr!mXdn}uSĵuB# Œ}N*..v{L9\8qmƵ؍7Λ7fn\tRnnxz<$6Ck>6kkS?jm~q%mq-q!-[#Imw-Cĵz(~~~ڷ7Aq-T{ L]zg5E?c[=$@\#@Hd9_~meާk5|ڝ4zH 箹 C5p̑ߟ8ͦk @\4\H q-q:C5:#  =$&=$@\qmR-V\\x\.Wnn~3STaaaDDX玎,-/ze⤐hgЦ ڄE6mRJ6NkkkٯLl6mi#kjC)c̊phVu 7cdittt/[c#M])몭F΂in0q:4<=4;;L6AWp8kRRRn2ْ*bcclmqmd2*=DZ˗/_lnA;zh-3V+~$Lkfk!(á9' 龾>5kim}۷oVMYIaQvn6f!6lŋ˄._,h\.&?/~ ;j]{uQwK: 5w?Z_U:'k﵈kqjvzonΨzQ#Ԏ5Fw{Hm8vΗ_ԠZ3y3@\;[=$_׈k=d_>TQ{I*B\}֒lF\!~ƞ%PHlqmp!653l%E]?{_]@\V ΰ)kĵĭ@ BqkktF@ 5ĵƅ@ ufq Œ}N*..v{L9\8qmƵ؍7Λ7y\ҥK.GaaaDD<ק&0=%6mzN3!!բ@{INNƠЬ!j¢梡!..N֍޼yYYYn[zYb!&mW̔T+qDk׮7LKI{(yEJ,..NLL.Ozz֭[e2---h\S "׮Y|Μ9*J3k |>E-%p10%zHĵ"##;::ԴLh#**]M_pa#k/^TӒrNuz,&ku'OksՊ׼^oYYӧ-'U 7\N׆r8#k*Yo͚5~-zE[[[%Z644.RRRn:5ĵatFѵv蚺<: ZicTbG IRoLF4퓗6\^]__/uS54+6h X\Hzꬬ!ZذaCzzJrǎ.՚5kVWWaQ~O55R%[l JbTwjHFFF`A*q-""py]߶l2b!Bŋ/_t:%iWp8kjj̒e8R`aɚf.q zފ yyyZCaQseݸ8Pe~LLzrb[`b\8uS/^|MצǏK`N7_nəo5VPPwܹ f6Nl%M~#E&qmJ*//x\n?)O ڙL34sm:ĵ!վ'J](1˺#e|@\Ψs?ZȮ 3n" qFk`H/7;"3]@\@\Nv~LMÕ ٩.5k @\kH q-q:C5:#  =$צ'=$@\q 5`f9rStkii#e0֭[Ǜ 'bǵ{キK.%ĵ eqM =@\#5Dz\{G~kk3gMF\kVPPpCےxo5@\Cŵ;w666Jbkmm å M6X;v$&&:FZ3κiqܶm$kBǵ>,))t\.Ǔ3!b3.S}db›b TJ\{W:tޛq @&KO}jǎ#^{~ĵ >~cĝw9'5SJJJdƍ-w>Ϻ\x)GV̎A hӵIIIRwȬ䪪*#qz_=--o233%Èr5NgBBL[RC=]&̒V]y y}s555i0S Lqn]]]fG?p~q666ʴ䒐OdXM)$ǑZ^innVډkvQ,OO>)ӣ& όp˜sa56>"3E 5> f S!¬+ ,UPP`wQS6t~gSzzzFx9,?OeD}4ill}Q,r'|Rx≰]ٲe,v}gfffjW0Xo=PWWׇ~hqZf]{~駞zJ䮰 %ׄoyr$?_XXc]wp8\.DyoVD2z͞Ψٺ퓰ܬ|wq,/ErY%Cn㥑+ 1+CCƵh4Y_|饗dV Bl{ 5"׮4fw\[ti0G\k5 t\q S--->裏Q3//,l_@\k5 4;wnlTvמuׂqC566QԄ#:y+P^]]]gΜy#{1~P^{ a__G@WW)#{1~P^{ a cd__?݋By+P^W^0=$@\h|C5:#  =$צ'=$@\q cUQ,kDE\FHuu햿>&uXbEoo/q 0'҆= ,زe+i@8??`zHĵLڵkEEE.TUUcMeee\\!***bcccbbjjj R#dB^LMMmkkW\ v,[J|@\HRT^^@ 9FŚu Cl߾]&볳kJ6$3SFHZ@2<<\]]6h"/%vIM$/q qm:NmZ&TILLP'666d\e-;2-v5mw+ʄQ 6)tZ5;J!111 @\@\,3E>2Y&hJ,Z?_%'fj\.~?33v[o5ĵ)eq"~tC?344$E94ǣ ; ]׬SlWSSVYF$sĵHWQQ?0@Ŕ 6hIٳ螞׫Om۶]5sׂRONJsk 2+W$@\tNSrLUU-IlIIIc ̍7\i&##%htmZR3ueWFZJ5ϗ,zkkkkfsI>c\xqm6(//tG}]Qi  h Oxg1"'Ym;w_ҏ͞~;zH~6N@\D{|op`]p_)kƉ@ "=$@\3C q qmq"-C5]5ϸ u9zhbb~@\Lkzqmz7*gOOOoii=Mh!UWWn;]O#uXbEoolkc+6\.4ik ,زe+i@8?? qM0#j@\2iSSSp89qqq.p``@-YQQ#ksKKKGȄUez^ӹhѢǏoݺ599Y:u*̪$immmk׮J5,byÇgdd\YPPD-Zp[b+W@)vK!YYY᷐F3lpϗ"3d[aG;uLeԣ 7&&@\HcL]*<׭[?44TRRRVV_vTdp,//#*sŊ===Û6mCz/fV%Y]2_NKKSG;:jc|۷D}}}vvo2A-f-a5I3~_J H_xi47ow%|XNƳ9fkjbV =zTҭ*vժU(隧@\&u711Ce/_ۃ֍W3eBLttYdy5:_^ZŦ.' ,s7ܰe‡Ena!v> Bbbbvͬ^oMMMww"*gsB5WY NOOߵkL;wNZO]3q kAw'fHls1;U YlY&hJ-cV7-Textcp AeHY`盐5`g]4 ."q*kn\KLLԟF$cE̪d٦Y Tl]CY j&V簢U2lpѣ "S1l폮Ul(.55uIIIګ532R 6tvvٳgNÕu@nnL kfU2[^Ur̖W'ƶmZ&::Ǭnᶘ͸ [reXq-uC6E˄H$ZYkܵWY e@\-udÒp8/^$M -++sXj:fbY̖j8N9"]gX򚌌 ݧkZfƍ&Wl5ϗ,[zkkkÊk,\&.\(릤CZĬAŎ5`uh6=bVaÅEccy(`6ǵ}w!٩o>fm\myqWvEuQ`ٵk<']/xMN`q\^ĉ4?Jwߘ_}={v5c_ E]Qij8m#]Qk)cO}d5s_"3noGMR&.fi4q E鍯>?qW=|ޜaG`t q-:Gyh7Cb@\}"lӆFv'K Zx_{q-]|ʅT@\q 3R=$@\ \qkktF@ 5ĵ6'=z4111*j, 2+,ơ1!""LLM"!MecwĵYr9"*;5Rqq͞yǣͩs\2_kjj:G,[AݝVZXͩzN355M-lXl*1116gpp4zL]UfmmVE?~|֭ZN:b-i׮]+**eGTUUÓa9+n[+++׬Y N2ϗ"KJJB0;c+ӫWu c#<AG*pժU oy괴.2CкvZ)2_mfyy,'wU+VlڴIbnFFFXYVK2V1[^OҤ'ueywb6i\" 477˄C}D~ 6.S?{Uw;;;e"11CyʕXulck.8w.Q d\SC5rw:ڴYluLTOtWUb-iRUJŦ.oF+&&ƢYEsnn q-R:F>j4;yFiXuTp>٬x"b-Ggff14YB6W[[[AA |>: =z{ θJLLԟ<55uIIIckfuXl5~hZ-i1fi!Gdݦ! ktm ͥd]: -^Ws6lؐ}0zYuo>m6ç `VNҒ?IK500?kZ-j`*d9fũV\1lnsI ,(qM*@gZ'JVs\rX >IIICœfvill>5qFy:;ǰc9fH tehYYkĪU9vmIFII锈ta9+>/99YVzeF7 a(ft 6;۷vĵHk.wy"z'N)q %q"-Cĵ6yO/Oo͟9砣G&&&Sژ!_׈kĵ&r曉۞9MTňkq6%aE;?4Zol26\qF\Cva&;6 ,NwNSI"-y^֦ͿvZQQSx̾F_TVVbO$͛7/d s,*`}0\Q;f}ZlCeKIIIII[n5{^5Q[[U`ѢEǏUvg ٕfkfm}膲5|WyosǙ͔-ˬp_l788XZZ=B&Keu3Ow[Ƣ@סq-"HCCC_NKOۑB& ͕֭u̒2R-#ӫWu\ EeB%n̖N5VX#شiw322&J$oֆbͪj6_&y0q{lf>zRf?:úO?fQqm-&&Fus?T/j t{{awrJll&q2[TEeB NQfȶtww[?~FU 讪8[ήWI&7Y*fÆ5|v4.}ٯO~]n\;°n!n\(u63Lۭ}Ġ>Oz#_\\lfh憕FNQ˴l'lrlOmp׃aCM+f+zonW{ahZvZX;4@Nk3444$7dhstMQw{W&:+d kj͚Uf ';E\F!>>~B8[ 5YS9L +֬f}po -d}~_atmúٻbtZD86..NYrʐݢ:Hv6lюgϞUgŋ+dɒM6I- f+FGGةٶ)l>ym,vv,-SPPÊkÆ5* g ]fb_M;°n!c7 4^.5k2uϗ,zڐk׮8Ny- ~<))IVCC/0% j l޼Y7[qƍUF ZŢY22pBiڌkl;rppX->c l&G +֬fGW3/pv~}cYYkĪUd[ |G-d}ffyĵHkMΝ C v \tI̪ C ax*m2zv\qM=ua W zrߗi92-s*iVgKZ0.2oҐ[mbiyƻ[+rɓv^fsj^;? z|:ѵ^z_ x'N;a$^oYF?kv2QܵOn 4\V8N;ho]X`leZo|Gݍ7]Vlk[e$>===W~|:D^| LGqKX@~3mXELv\Ɗ(aPmE||ZpgbE C53&/nm OfC_AsܼZ0|jRO!¬+ ÊkA7ߔUii~cc<裏?MMMwqHLL|>69?m޼Y;]j'[otwuJcoaأ>*nj|)Yq ܹs0sl*_;kE}wbȯ6k5Qo}cc3^f";y{Pމ]]]Hq 0p̙~;2 ?';y{Pމ}}}H k\j!q"2 0YS<=005!GOsx'Ay'^z @\ M?.5zH"ktF@\@\3zH5  M5NzH6c!**2ej*6[fs\ԧ&@\q-hWviiIVXK\F\= ,زe+i@8??q͔ʼn*=tvvJpN3++l4Hݕ:+˧e***bcccbbjjjB.PBRYYr  2ܬ(zOM F kssʕ+R[ pSC MUzXh@STT2IZU>zkJ׮]r67[.77WK=KJJ +''g2Q__Uϰ&\yyyrwlc,T6@ `ǵk5k!TWWn;-樏IVXK\#@\= ,زe+i@8??F\>!***bcccbbjjjZM F zN355x4S2Bov 7\ƬLϗ"uHJJںuaG)**R[**++d~aaZƢ (hr^j333npûfeH4Y` Y~K4oᶀ!stFCCCj`]ӏGMT'&&655Im: %v*lVrQ5rT zfêYm%Bi;K lY]q qm:zɣm۶$lyN===|& sd+WfVfQQh$2ʖПaÆN>{:l~X7%K6m$Ml}jY͵}kkk?{"վ˧7/$lv' EcU~_}ws/ƾĵiǜ]qj6V?6/מ|δeik׹ E~/*~K]Mؑ2 E\gt^_PKl>F#ĵH쌂>x ѹ_7r߻6m+.S5\j@\q 98!ZD2uktF@\@\3zH5 (.4$MN !ga17W`&n5'OfggGdee߳'fj[l?OuVsYkGeEͩp8k׮\.IUUU!ʸ8yp`` ȐfC111Y͵2!w^LMMmkk|gRRĩѵ @=iM6]Jgdֳ ۧ3??v˺YYYfmoք Aeh!wyZ^͗9Wr$}GZu˓rd~֭*)))++3,?HNNe>;;[_v$ )MҡKWի5Y!q-pz8N5Nr~bbbGG6}}@ї'Z'dZJCԴEU2xjJw:ڴ뭩6kp F-)VBM@S֍nۺ:yĉ%>~YYW~Tm cVr}G\8\c!BO1S}Zd5kUYYi*鳑mmm,X`8nzR#sYvZznRv3کvvvvccLbg]r}G\#kQgGV]YìkrQ578wltNYoBУRfSS֣YT@2PPG?j*{LSN-c6fXr}G\#lk===!AEEE~~7lؐ%ga5륜YYY۶mLr֟dVst@ U՟jfiR7-!?..nt-PӒ?n* IL]uevzsl&Wy\2\-ҟjN_xq]]?z[ cVr}G\#lk7nt\!?,..v:rPlvMJJr8rlnhhk---A3%^k$4T5bժUrԷ4… )))[`SNIV򗿬5YEM౤Dl7!}d~rrzkkkEn[&O?QffsKVI2l1+9#E.5!ϝ;'Naz0`R \tI|@\LZvl>Br\4k5R0+*mzoQg!ƥ=$q "q kq 0]ܾ K\kn㼯H')kq#\j{ȫ}[o_Ձ^ng'5my7fvx]Mq TiڈZM{F VO5Dty鍯>?qW=h)j3¾#@\"\jvzȋGyh7CbkH۴a}6kHt;6߻}4\pkqK  E4!q kkSi aTDE]F&@\!,ǚj-%BE}LbŊɎk6K&/@\`-[̟?6k&ʼn׮]+**r\J*++щaC2QWWzNgjjj[[Eɣ>|8##C&ӛUTTԘ=6188XZZ=B&nȺ+WHmwvvJsRHVV~-d>/%%ELJJںuVo '1kS2u>yyyrTXn]nnnPIIIYYco괴4뒃l߾]&볳kJv%hJ͞Z(// ͔u +-ZK !RI 7oA%El'1kwvvj2@bbbGG +ckj4K2.YOd-m]d{{PBBxjujax%= k5PBYАJix<`[k5;qz3eCZ2GJ.i fi+**F(aÆ-ɝ={V8cXTCfŬd^}deem۶I@џfڙy@@.77WZ\\:lʕa5j-tJ\r ¬B'1kapptJ8ҟh%9 ))p,^AqFeAH-%+---A3%kӒ$+C-Z5bժU&0|d [[[V\^pvJJ04h+ q99_W.\<i 6 \tIq!t>=7h 6z<]TTf_#Ilr5W.&q-Bq"-9=5ܘ.m# w`=$@\\pS6Ҷq~ EJ\ƍ7n/h{k0ͺ_nGCk=5F\HjoHoJ{W4ۡCĵ‰iq   ߕ ߯n 8!ZD2uktF@\@\3zH5  M5NzHڴydvv{DVV폊%ߛ,11tFGGY< rOuVsYlb$Hy 6k׮|>B<qMҦMsTi```ѢE*fTiiiuuKSK .]bccR Z7ܒ0=$Deڵk׭[4S-k*y g bik"9qj_=iqᦥdGˈkq-:G\Ko* kq-B;#5ئ {5v'ڴ8wa6߻`R63\|ʅT@\q 5DZk q q!6զDڨyl.`R6[gbͤ>5q L aA?TWWn;-*cR+VNv\Y2yZĵ lٲe״@ P\\O\6 ]VTTr<OUU> TVVCuNd:Զ6G;|pFFL7771{jmbpp4zLݐu3LEW\ v,[}>_JJuVí0,piOkDZ>yyyrTXn]nnnPIIIYYco괴4뒃l߾]&볳kJv%hJ͞Z(// ͔u +-ZK !RI 7oA%El'1kS2NmZ&THLLPa%66v qMfIp:%+looJHHPON-,Ǻn0T _뭩 iOc!"32 :Qp!Uޚ5ksd4;.RߟvȰ𶶶V ,|K/?d{ ĵG:::k* a5!1*xpGT9Akvfʆ455Im:eD+=*4\f!q Ψ"??`DAA6lђٳgՉS===E=dZJV'JVVֶmۮ|kfOdrss箍?ũV\V\B5)p+Z,d{ ĵDb)ᠪJ䀤$ñxↆmƍ].a z"ddd͔MKȢ xjٺ2׈UVI›e+^ommmXqMz…)))Р0+.nO`nr~?q-;wNWCϿ+*mot#qmf+//tB~$msۯ]MڌT[[xnwQQ zj#l5ܔqOW3ش[1"ꤸqƍֱĵHk-G}I]k5sn<.s~;zHF\駿2T C5D{ CĵHĉ+._Uq 5k DZk q q!68!:ΘǏQy鼼hY7==1CQؙp8nŋ{zzZ6dee^w?wڵ}-[N\;w:YK|ȰL;󇇇O:f1qm&q:iG(5pEה 6HY f˗ѡ:2-\cbbƳ M1H{ҥ5k֤իW;qti](lkk E L}ժU˗/=Cѵ~)4+C%,Y&>>>+CGվ;ސA?^PPvd^hMMMJJsL>}:??_[wɒ%A߻fؾwM\Ԋ }ƶ48{0[zn= `R]>|._ν"B߱~=d<{:ɾĵi}jޘeڠZMKj6}~-!O{S/ȄvL3\@\+] Tŵ=dv5cGmq-"/Sx䝣y!_"3Rm0/7;"3x55ĵ]>SpEv*4DZk @\qMi E4.SzHFg55:#@\TDZ3ů/#L'<ѯ9k0I6+o-߽̒xd׬K\k0DYs95ˇ[&&5^gΜ n "ZMMM\\63 5`f5Ƶ_~2_ccoG-Me!SayIiLwu8W-&YvȴcIkS}Uek>/y5s4-''S6UP {co&M"a\~͆?{[.Mjs?mPs:=|vv˴6G;}{+-nn!097jq!]Ss$nQ8q \k?ycJd"==vĻᆰ%yT[,-UWDp㚄_c@L nO ;9634-9ϟ];qMDݲ`A??G} 0Th~\U^yokqmb\,mﭙAo2'7ɣ<31=Cyy_N\n\^U,\GkOE,YTBm /5pᇥ!ڡC[?wwr;vlTHw;'Zޓ}cln`ŵZ_jW{d))s8n4Mv~N<ŋoזșDvL9@\ $]z.?{9kxOW1v!P3>]?9f<*N W^e_ T5 }@#ߒF\knh>F:˟{[^akkHlk56Iw+f -Vz -v*q M;x|fޓߒ/طoغ`@\#ĵr;8z݇,);vhll|7` q&˘G^~;w}}}W5@\c;wm#qM`]]]W5@\c2tKwMo? `qfM6>w;r?Zoݻw}6+o5;#j3b۽e+q @D/z[nGjo,owyϧk?}$f.9wW<+Ym׮]| 2ʿq^dCdsw'9>Jlq7ݑ{v辕 JOoh\m$ 8~aPQA]t]tE](EAHYDEًYeD\9ine+tӤ䶆։dG1 9OΖgٳ<|iM]4ZP3PddM BHNabs-q:mÚy 8=7aǍӗzjBE%f\lOe2fݝ88884O=,g䲏=<2oi^IjBȵ4>E>Jڊ{2[x'yPQQQ|ѫGrmL۷/"ܳA=RYeeylٲbcCk\bf #"RիbWȵEks>,kjjJNrm4nk⌯,;ůor4STاkat smbK҃y 6Flooo&dO?cy`;]݈5  \k\; wrM5 \kr @5 \k:c~Zr keıj Ky\k2+ݾCr\$WVmXwq|ȵˢ5g.=~ܶɱ~΂T֭[\;|QW}Ʉ_WǯelvǒuUo^]+ Zߏ%lK ۛd.՗Oj\R+ ԡLO͵wFQjݩT^}Izmu2)6% G?zZtsޗm^?=6@SgG}{CG j缝m?hl?һb_rmmqXuS?PtZb!=^;%vw="6@Ks\2q [-ɲO?is=VO kW#Z2Vjy'S{bڨsk:i m{#ƍ^]Vb2ӥ=`6ؾʫn&ȵ9wGi;>20x䒵W6ޛ憻;~h3 JcoGc}7ifjCXK8T[{&>ύ' r;:cLXKk8_k&g箆i/@ )XFYZbן_k\+qeNAiU:ȵZ2Plv\+6J$k2X w%\k@IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/images/sequence_trigger_audit_in_decision_engine.png0000664000175000017500000021720100000000000031311 0ustar00zuulzuul00000000000000PNG  IHDRZ] 5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gUzTXtplantumlxUQo0~G8*5uRu*-[ڽLd#Xsvh [$;}b,6ONmf~s1RHa jrS1+ }pu.X͓5IuLIJ|k6eф }p08E32=p;? 2L1  #\*^9W5f<+7rCD]juJI(ꆉ&5'f6;"T^[UUu*K.Xu4~8g۴?/lKMqSZa^}w^?њXԪsB2tOUA.cvBǂk/{7xQFK0 om٭5.W4(b:5٭}aSָB؇o)k%S֯geLYgTkڥ}Pk7_`Ԯ>.>џIDATxtcW}"!\Wપz^ftH0uSW5uݹZS^7O+k0dp3ĸCL&!q88 YsA@&w8Q]1O{X|>[>sQPFg?ŧg?osFv:p\(Ó|rz<ͭt.3Zk|xONU7c3ZG9yI>9=VWv/~1=]dndӌ< $j֟p[֋///--6_k{xONn5cϜ9OK/]$NC9yI>9=լG|SNI|mG< $jO~'NH_8po>: Q'VGw}`P>⋼M4G#oSYYc{wGB> OmoŽY-@a';6ۯߔ#d޸c'?9{_o1pUR_g<rn/)Ro\']o+"-˄qwӻ/ϗ#ZVˍ7nݖH\noroy;*4#۪9==vܵ.%R7W>g~KEJB&s^zB&oooGSog ~pu_$xϞ"'UO=1yW{o̕x݇JQi?Lg|K=}W5};6{J7Դk*4Es}; #wK2"%;$e[o|BXt2峯hlێzVpc㻴4o)OKp;kԯԼMO}}3Gd]hosr/AvWˋz;]7ܳ蓂}} ZZ~_{ 36ZJGI+[Z͒#ײ w~&t*,v4΀V 8)Z ljHrfx7Sr rcuvv]n&O~X:A@gώmHkcuەܮM3_7w,rJ?HA&{(eÒ诜߳K]堕OR-l)푓V~={z'mᛵN9|'z||v:rR657@6ٖ Q3osI>)ϟ]ZLuc=K~;+7&w݃w*-h#ʮYnו ljHYSjܯ};%0r>˶KswE>ȍ7Wޔ%!JU>#I6~\u],*5;q:C+|)"g1}_ef ߋ/~NܚBuGu]KOmC;>zK_:c^j [՟9s|8Vm.\ezcs,AܫrF6{>)ݗR_QQdžf$q~l;w7c6Viv-so9ND)O_FwJaJa+|Vmm|:67^,I<^~\uY/~G/vH$H+>q jLCoggK־''^OiZ|^cGM}7[Mz3o3'[[WU3ΘYml;wE]]wnf\Wo8Wl) YM{ӱx6~\sJ6@߬O{\kk{_JnI??HM]۷ݠW_Fy5]roa\KORg3b}Rho߹l.Gl;(,v54?lqúﺻu͸oߍ;eCoȧcQߏK{>9=]rEEkJQ+D4ӔJ-MT?Q(b_yɌN67[=hԿF@vVRNߔIyCO m鯇=ggzf9?d%NƝfpo8A$'n[66ۺ8Is ~kϽS9l售й-ěHzw΀`RT/HI /` r9PnZE|쓃?=wS[[L*U!G챃$co|{][}mSϞS\߱x'S5њ}kjuZ7G{U _uk'ռٚi_}vQYYkiߔKIg&NP)ɱ[<6Fn7zq")W6s|N:i96t7>K|:vo"&sniߺpzDS?(뿺>k73Rr|oRߐܹCK5|37Z,SMMC_Vl&o5uP96I K .yo|5߳Fj䯔c .f6Y?S9v~KAKxL EZG|d%Ge\[" ?MRc>_nr<>鏪C@rE0SPr{~"GW۱x1mu[O G*>^yI1W:uJv}C^v釗_~ymndӌ5I>9=_|egǝW-]z@ҥKhw#πf8O6Fv| g9?ȫ.= n,]`ݷS=]fbsZ䳈@+;X>Ȟp‹לO"Z^ën,]`ݷS=]fbsZ䳈@f' y v=@<.6~Izly0/ Pˈxv=@<H  @_`d zly@]@m š%?"]ěT]v}aǂIޔƉ @mDbYwDfHАrkvM&Sccc0\WCPMMw\tSSVq~ZuKHyi  @-hfggk/1_8{gW^Y$2}}}xG֗0xG&4V󭬬[_\8E1`yxczB)3>lߴ7h+? oj77. dh<㱷ܠ"Y15 ?d<{ҷO4 o>g~ _sl7|iׁ_Ǟ/?ϳ#lyhNqmr\޳gҟlywaD6}ߴ7rF#'> m B!=AJ _@_@@Fģ`D~(/;ly@-ly@-ly@oa~ @_Fģ`D?_vNSTMJرcvb|~uuU&RRѨZfeaaA[Bf:;;8;_(mkk[ZC @@^d2HHQx<%HD+9\]]8A$"N}yyvRlyw*Ezٜ}8nnnZPɔ8;ly@a'W4Ljx<髿Nsrr2HHYfhi -ly@aw׮]R),Vٙ27F_6M[,K՗b1h4*噙5D kMMM&r?~\CCC-~V/4PF[訪%ni yK @JA_,5aܯm  B bK/ / FD"Lly@W1(=Ń ?m /Hslt [iF6@9*8ly]#z^j6[ZZ2ꦚ\]]|nM+H߆P( 3nOk?J ?Nˆbp8L&D ͭҦmi )qJ[HmoooynY\\f~_--e3B6@G"VWWG"<77!u\###q:j3+++ Vved2ust{{jmmm(>ѣ o.u:DBWYe2x|[*Gٶ' ١BfSqڵKK}ggg']OcXj'Ƭ鯬7clۓlpp5JyffF wg`D|o(1L.kttTtSSVq~lOM0<}:%o*w /8{'ؘnQ뮌n*br8 )]w[_nLE=R_#GDCx}}Vvww;v,]Wob_ve}Ree^Cݻݴ*eލ.y dšbP8e0)>|x`` R5SZVVA6@9*8mL+++5x<m2ܽ{O?L6%oely37W־ظ*<844MU~ _ D_1걱D"L& gи뮻Ue$q8GbW.ߞ>}Z=璷 @<[(0"XW8|KKen!yh4vdzSSSZWqX~ݟ]yea!@_ȓPXO}@-}IOsΛ?~L2SO=lzc?`_Cr/N2@_ ț\%IˈxX~  ////ůG  P_v[[n)YVV_ƣlywؑJ/w𿿖=5r6@ YbK[׃_@ E=0s=.l6755MOOk+++kHA&  竫x>+m6b檭Pky;ߙZѨZؖ&WWW}>n xQ/"dرcws}}}gi6̭055РYҁY' +4gسp8,˔mm6kL-\p܈x1<ʮ h6ᘛRyiiggg].&tF"\YY)Iqe) h*!9^,"6WWWkDdwsCGge?}t]]|>ȆVd2Ie"-Bccx޽{5Qņfժ=׈/ 6wp8WF7tWhjj:rN&TV s7~ _,CM7ݔLfԴXŊ͈\-ly@S'ոXEǣbuwbbBjN8FQ)̨ǎ{;yq)ߚl6M4šbw%*,)1גb_6@KVWVVz{{-kRSNi6R̀ndjllTw8l|<+5 BbUnjjŞw~d;?X F{މ _clyxO>C_@Kr}Y__wC0"5,6#6eݟ{P3-m /@<[*GE(jhhX,n{llLZ&z^j6[ZZ%No&l6YZggg<G_6@ 먨R?K}ep8L&D PEl``,..J3߯N %m n͍w\###)HpEEEfN3hJ?P9EUkkkCPF VURk226K/ԔNsrr2HHYgӛiÂsu"ikٴX,Hեl5`kkk4̌"\ `l6744EiԘL&5::Sivecc,EZ0"5,6#/CAd /o d /o d /o d@q/#_Bly0npwo&---2LJݝbXoo4y.y[V^p8sOb1>334u]Tr ...j(R. gT?&땶ݴرc^Wf]|Y_TYYp8{t7JwK*_FG*fsʳHTTƆUm ŭJeeҒ&_M&w~w䭬ly09BWUic7 xK bcccD"LJA&Ϡq]wuwwH$p8=Ů]=}z6%oe/dK/p7pCƻ*Z[[hvTQQz6-ly0SDJ(~DKd7K _y׻~)ى/@)ٳWR^~7pb29DJK޷E7EJNC| oxj?_@w~_xʟ??k߫[8ly0U;ݯpc#zGm )[P+UU-ly0j\tedK/+_6_Fģ`D8a!@<Q ˎ @_@yK @_@%eeFd =ˈR߫K.R&Pm  E>b!ql6y3k>H_E4zVl6,,,dX5mZ!w3nD^k x ԘL&5::klcRvV9<<,۳nJ}]]lnhhBA/]VaDdz4I>'D"!_橿UUUږfK&8d4u Ȧ...J3߯MMMy{zzČa_p8ѨV2KW_nO_iEEErFFF3XlstF"\YYٳg||\ UlywU GnA4cp8lZ(|wzz]ܴ6 ^Uzr4 O#'dKHDW]R_u:DB7.`jjJ.i ҄#G'd2I6@9*JY|65^o|v٥%xOEi,zg _ G #KIgggv ~ @ڽx: rTB`ƛ (Ab.N&@+k ?mofdJL/EP [VVn F_~BX~ݟ]ye`D|ttt t#oO}!@ݼAq/(.lߴ7h+?R.~@w<kGܬLBA/x-7h#g?;@/@vOI>>?xc'J _@뾏nҮ=_$~N? r_i/{_@K.~0OW`Dz^j6[ZZTa2r7u\R___ܹljj:ڀ~&kڼ7& 544H-#0/mi )^5'D"!bƪh)$k_5x<2U72G3TUUpVmuDܜHo#ZQQڋf\TJ3eR2i6Tk_^^̨97rwaD|~Ri8nnnZ mtAz#tEuu' /GE鯨~2h$,! dRd2Hȼ~8*kɽ(s]g|iSSSbjޞ1cdF_FYJzv{DL+**_522Qb3Ξc]N3h/mϞ=RR?L :_ۭwtX9777[Vm6J!noo7 B׾uʹӧ ? _(}~_"D"꒪霜L$Rt9SSS|7uI35YO&4559rD> $IbB)>֦&덯ޮQ8$GljoWWWF%ͨ>OM!+ *cX+eVtgffl/M'No@7HYmreefY\shhH r^===jl(1L.ktt4]~:Y~CC0<<,vY7ۺR vKFum;KN:Pdl@WŌ|!N,mpdKz)7?.@wFė/_Pd|i{~02dW1(^V󩟷( _bq\>, @0 @0=z'@<`;ly(YeDqڽxv  `0h/b1. W._/# ~ @<Zw||~ @_(-++[̍?m /GE𧻓~bY:_()=wu?_z! /f|V_#EL0::rfs}}sjjjdӗF^jZZZr4s=ڧ /x}@B^xh4jpssjՆ:L tݍ$d;bP8p8WyrwCPWW]tNNN& )ucwP ?$( xt:'''~pJ#nW#~#G&;::e+{mȶiwlZeyyy,˦L RϦ588ڪ̌}^o|v'&&dĉ_ƒ>֦ill{nMͶ7 ՙ憆5axxbd+jjjL&UϦ̥햖ebCCCN?d$HYU#+kqOl /ZwqYc5zsW|(~ Gs/_x}V~(U6@7b`pV󭬬Hbq\>l /o d |Em rT@6@sˆbܿp!>\.\QQzϝ;~(U6@}---~xuuubbb߾}_@@L{+MMMӪqfX,x<}2K+θa2d2z^*K~*9}l n iż,R^]]qLW,Ed;{kxklVvuugSʶ @hoo/|bbB&O8A6@9*_2<6b\g}`kGsCn҇>!e6+**^s箆nZex"J ͫ:th~~^ʫ+( %#J$(59~fY,x<.\Y%ʊm`ee| )hkZ,d2i5Xr@<Q۠?x`8NdzH$~ooofkpᖖ_]]N+kwyhkkS7fЛ@<Q۠/_[V|Ix0|!'v k^ nV1o{={,9_ P;;(8y3K__ WGfX,x\jzzzx@RF ɨd[m E/x9):u9Gc---R*C-x)Ư7n_S6^ϞӞ +\/P ŢN3hJPSSs+܍R3f[&\K=pO}?яB ?. /ɤ:z,687Oͱpl~lFգygx|'[YBK_x&\ge\gknw[dp &glE_-Tw?r>}Z+g+"L&r<q72ZousW&ާ4_ T@YƹxbtϞ{Hʯdzᛵ~z󝠔G+V}}%H/VwppUؙ)444a)755fk|ڵKV',zcX k>zGUկuuɽ9skEY檨(8Ws15)pU++ll|߿="/[0"XWPm2'OǏl5kw~o.Kwxxbg[fRv|7`tFL^fcWސrzoԷLw!e5SO-($_ "V1((?=hMݻ[{^})Ϧ4ϞɸcnRUH%@_XSC>gԳ)s57[?WPx m(_ _ @ 6yco_׾vQ}+k_;MOݢصN=2י3w9o};DqX[԰c7[[֯;MM?/KMGjLop:СOϩ;3o|Qd.O~X=2 lmxbT3]RS~㐭m 4G K~@ޣn5w|  7>JF\}iS7hl|/ f4=4X[Xyr_~U?F@_(@ݘBv4U(q8&I&ѨZfYs 6MǵU'v}hhHko>^WWWYEoUUV#۫|ui++;pp8L&D P?S2Y\\f~_-\fikk---FksMMMy{zzČtG7Tͫ\M͸Xs9H$+++FZY j={KavvVjuu$wIQQ;==.nZ[[ ~2777[Vm6""\zr4 O#-E6_SSS#u:DBW?D86,8H$ȑ#n;Ld ۯ>onnN_ͦUbl+pᮮ.l\̨!@hoo/|bbB&O8[|QƒyA6@ F:Р? [,luYMMdr\ٔ4vҲQaeeF衡!)N?5շ|؁< oΊI;11(/]+,,{p#}In#0cA BTFBMq9aP)ns xcfJhfŬM.o=DSct.EZ΄Jpj aIBX~v9S,/^uC}7o_9qǚ_ߣl7 R~g \1? vrj^o=b5~zhxx _7t預ݑ]z*@=B;11z].W,rRt:u]8Lo_/~ˢԔ… o郿'}{{VS?υ}gJ%_$=#;Sk7n AÌx׶n V?SɲK~/_ǿtPȎNɮOv C_~W?Vǟ5 x 1ūW~w=(dGdwdd؁<˧-4ܻ5cJ߷?_|z*0|*_+}Zw>?XS9(_>/[_H3.`_?=_r(繿?q?{u^?[//\_ /Դm!/`G崇r ?ăly@` S d /z C=_zm ̈GmdCr6@@p:;E_<_@nnnb1z944xOh4*+5(AԔL&Dhe* @ 099Yi-Zʊ|> _fģ[vB֞H$bH8^[[x>/ɨ KKKvߖs;u=L.//#N>/ WJV ,쟶t:+F֖J*5Ɛm /6WWWWWt\_qBl9٘kƐm /aH$Eoo!ʌboll\YY)5W~BH<Wyn}#d0 _<B@B6mַ: ܜohfݶPP0^! _>za(J]_xI<˧OlyaF8u{.[ZZ@]/3_­okkkBŋǏ̙3+++LVz/O;wΰ"Wu,H谣c~~P(HWҦW(WjF_ {–Ap;u=L.///--Ɔ6Wc٪snʮR3@BAntW-JUjlƘ`ᬕV7??r _@_1Bl\|>_YegZol>d畚 /wPsS=v), ѨQŔ4z<Uظb<::ݝdPi®57E86Z*gϞ5XԼ2#-!,t: .]2ꧦ5M qiN1@ wvv"(x<nEdO~U͘0X:_r wBH<ypJ ly@@ly@@ly eF|ۮ6@T@s|l8.BݙLF X %@=̈Gm :wnߑ߻\.H$[onnz)qM<OѓĀ윚B6@8޾xP,y3wnucI~n>!B՗o<'ػO<ݗy? /U/ WzS{?@/3_­o}]K_7M ꭣ|4 X]큰y/ ߞ)G }}]o^%9d / ^{{=ad /[^ړ?uZ]Wg)Cx~m 5̈Gm :wnjrj[ly=!!0 _<y! eF=1۷osFCR@kZ>fvo|333b/o E_?c߯iZKK-+I011~t_z}߾_x0>>.[n^!_J1ebXcc^SԦҺ֢Ѩs^Yz;::JRNOO{j,k~mV[$Cz))wuumO}1BfmVir*dCV},]˾&}Gw駟8/ u"R?-~R64Op8믿'/x7#/777w.B$1‚DRڟ8qt*.1y9;;o[ gΜ1ڋ'nCwwj/ngi9ޝN-Xm Ŕr<__m/RZ^^VOY.a5ِV %oEMš? D}Qy}W_}3PfC?C)KͶR=+7G߿ 3E'??###򻿿_~zsB`4_H3L6֥4ڛF{en_?wT7bͤMe>/%JɿUdٲxfr˷J:>H3},_kW:4/Uom[ߊ&BW韊vE'iyܬ)_R333c,R> R(kjڿKkE[w#2D"!zW&'>Z4;]6位D"mXvJ^ths 5]K:/]⢪cccWА6TjSOS^]7an-&?ܽ*ʘ)[ihjjRNʾWͫ~0a׿^fTjf*3ɮ&ռE[R[QiRg/ /N?I?O'ZIO|_я~O~R^~_[mmmݵ=y\.uZR頹fxBj67cIammMՋ;12WN󶶶o۪r||\]G4n*ͥUe_J"R%VT_YD-7(Zdh*ߊJkW:4/B}꯺MSKPOƓ~~Afw}.} ^xu'Ns=|fZZJM>;/ /H}}ru'Q\.ڵ@`~~CvbC{n};{7;=Z*mD鯺{݀@_2F}K<)*K<)Nu'>`s%jz}6jIC-~5:uȆ B/ /T_@_@_@m n4v /[K*mm /rJnnnb1zv;44xOh4*5(kƗ/_u]ӴP(N-R d2i8JA L&\.ٌӧO'k|C![ZZF_N "===b~b׎ᵵ|>!džVYP(\x] J(luf l ݊w9sssƖm`CSBd2,n~Ғ*ollj7˩اi],..uRdryyܸlٌ&cĪ?8qʕ+Ry# NZ_@m Btx8j8N R).egy] X{NOOKt 0 /`rϟ4Mqll̘L48N9 NҼJ/v% O=lc)AcCJ)⺮OLLX̌/]͛7;s"4@;$_­o߁l6oa)c{gd_a8/ݑNLLx^Ō'7NS^xlc /l@<) _;=jpk[wd;j~"/ _쉉 ]5M3еk&''[[[e(~rhh8h4fղFod9"9t/xKR`P: RR/9sfeeP(HJ^ɓ'Up8x"0/ܹsJ>},)*Շbuu_1!n}۝*[R4UKKKv+8*,..Zd2Lty@߃8 kVj^68jnÎ/ݪt:+&֖J0;0/˧_]]-3 Vx٫sss>3|*_@vwwg2)/,,b1UظjytX`v@_O Q_e@ptvvJor||tV`~900 l~tޮiZ0d;l~Wh Emַ]/m8:,Dc׭^I__?%Xy+c_s-; / //}<>~SΏ\ //@?xs[` !?S?S= K?[ wH[,IlC bʻUt?g7΂@T6ԐN#n}Wݿ?e_Qv[ꢯroE%_@_``/깿E{_@%_@_ ^?l<m7ly@ // /'@__@@6;$_­o/㠿%@_@唀rR!@_@唀moh X /%@_M ׶pww"[j{G_@_@_7D"˥iӧWWWUf,s:^wllJRH&>p!#-h6^f406xG /rN/ |~ppP$RKGr}}]JR8w###pxmmMDzJQ$ݮo!_@G쳩I}>_&Qe)BY^ bFW~iiI766n6An[ZZEz_@m wuu\.5Pdf̶ͭV;lc u<@{~vv6KY~ʻm}eZcJWڌ* Py@_>/GK=ʭhٳgs|#Hvm닌stt[‚yJq+mFccJ5ݪ)B8F_N oyRTkku}bb\.wyMDLƤ`]_j@@zm^f;Nse܌F/0֡)=7o㬾.%/pk[ w`` ;pXed72/ /n+z.+rm[EEE_EE_`ʷ0xmַ.;= =/K<)_'&&t]4MO׮]lmmPFˡ!t:h6U%Ip8xppv755IUJtdd82J9sfeeP(H^ɓ'Up8x"0/ܹsJ>},)*/:6:$//4/--Ɔ旪ҲXj]ד2f`m ׶p~o5/~5aG:NIR)2/ /n[^_9GxD_@1d2R^XXbqeee_h_D9!c/g{[;UGggKU;J/ĕO~N5M L~@߽W_~J<_I ׶p[{[7ƿWȟO Z_r OIDAT_ }[#I=ieτly@SB}Ï\7>tJ]zGp_ _N u)7o)KL//UpGgO7lC_WIBm/wH[߬Tn=܃vc&@__ U5wnמ2:A_@ns/>8_@sWSP /P B /wHo}{!Ɇ|2__:vؽ 6țu6_wU __/P{{X}C'>0R-:QG^_E???Ox6g>ٟ#PMσ 2xkqFC_j)/ҳ>;33#CŶ!O}S'( ry  2xopFCw刄W_'O?=uPl/| SP<x  2xzmh/PK2|K\g>=daSOB9B-!۷e^=( g?Yᣏ>*CL6e!Ν;J_@!yB@ -v?!$@쫿< @ly@@ !))Pˌxöp!$@쨿y!64T;޸q{ٿ/}}}/ ݇pppv755%IU24}x[KCXϜ9R(7\˓'Od)cq۲[Z?22"kMxϨhƵXѢ͖ť7pݪ<77եʺ'je`ς@²!,EfhVIg]ތP###êN{{{EpRu @SߛVk|k[+>Z(T9Z\]]]-݋J1 u/;b湿RPwERyo+g0=vl.WVV~FGG3b[aR]X,X?:_N حbѓs~1ؘzC<7OӡPp~%4&"eggԔD"Tk e,?:_fģ[ߎHC4Ř6y0[oGG+@8r!t:0!ycw:/f@w31­oo4;@w׶ַcv 6yE櫿=77'-_vwwg2)/,,n+ի-TjopϞ=kQs11an#+kX\\|YuMBP:6 y<FlFR`0( 䤪$ .avXi'E_׷1?s:k}ݮ /?VHRx"(c&YEEe7@ [\\Te)x^xa2⑚_ZZR卍 ]ڕdryy\iI_n9:y^H Gk=,f&Q(tooq[[[*rvUQ:_>/_g\-{uWWWܗ9ْ*7ILP(r6o5c /up8\:Ѷƕmwtt;Hyaa!v%jZr;&%,zFJV>X)7% ۺ42@ p8:;;JvMӂ1aMJӡPHuҥKF} r_`%eF_ _@!/ / /Be &T}* N3LNNŤ3r}}}[HA^L&D\.i+__R mnn68H9ͮ -B!˲ƷŒx׶p꯮dryyR>/ɨť~qqQzK njjB6@,m!ܑ^֖J ?޾R?|WWRS,K<˧T`nn֛.--KU6_|^{/ >occʊQ_S`$nkK}86J'dgϢd|*_ Nx955ޮiZ0,;!˝?^׎>H8TIkku}bb%@3n};޼y3@ _<_ d7|<_<_NLLx^Ō0!yB@fc[,z?ly΂@ !!@T`@_=/ ? @SyG6o@+ŒxöpK<QB /B_@o-w7 /P!}~ץrjP ߻mַCx[7ƿ9oeWgkۀly3 Cx;4Ŀz@{٫ SB]~o>DSO] oeP=/ !p'/ 𝫿?'y6@fģ[ߪ ^繿=~&@ABmՓw਄s/sh^MNm~w_@{aF/;@wöV njjJ&&ˉe6n!yixĄ뚦utt\vmrrU^Bׯ.^zNg,3/^j &? y<Y<fY>H!JAid,,?:_N 6 ӧ677ŃU@8^ߢG^z̙B _DBZDǕL777}Y]]5TZ2yO oBҲXTJ)i\-ziliiX\zۑ~c ۭsss]]]z2\^^fA_@9%·-԰*춋֛q8~dddxxXtoon[[[*^::_fģ[vJW͗oW߲WG *gY{577WT)5F7`sa!Tss&a*Wna{11`Y]45766vwwg2)/,,b&3H3ÞRYX@[PVD¹ENwllL=!P(p8t]t~||\Z{ ҲsjjJjfff"@*5M 䇲 _@().́@vtt+h ̈Gm :NH<{T΂@BBH<ypJ 噛j d|*%u'Ng|">6_@-p湿ǘ{Yqccʊcb11a4w:'?$ 355. Pe !Gfc[!`Gy(}'d B}0nD#@SyGe _>/ װrylyZn}C6;/@ a ˺k xhh8h4fKגJ425n!yYdema-_ oWO۵Bpvdd$K}>Dt- d-zzzJJr@BhTHMT/--Ɔ.]dryy\UY ^.עò[/@q8kIӽbmmmTlZKgeFO`\6.{WLP(r6uu @Y_BX6+++hwww&B,+]Ti GUچҹǎ FF} ۶ú_v @MǝN\955Uloo4- r\"pn߿Ys:Bҭ.]2+mضXˁ ly@@lyeFjr«Pˌx׶p.Bx_ @h^MNm~w_@ y!0! 31­oly<̃B ̃S!$d Bd Bd@m/31­olyBPzƍ{g_ GH//`@wAԔL&UM.lB ȉ ]5Mvdkk Bׯ_7]x:X,f^JLxR GbEF^jf4[ZZ2v~X|ccv\WW*뺞L&Y? kAˆU֢Y%vz3CՏ r:mkkKR ց2yO oBXo[hPPl6kqwuut/*҂/0rJ 5JAMI 湿U,<رc bh4joll\YY1d2R^XXbooMff=Kv_c`id_@9%:b"EO~H$-Ōwccc xX/݆Qse]5M Bth<44"fҵR`0( 䤪r}}}[HA^mY.avXi60rJ oWO۵Bpvdd$K}>Dt-ͪU,EOOO'[|u\zn9:_fģ[D⑚_ZZR卍 ]]ד-..^]Eelc@/@F+ɨQt:+f֖JvhXw˾_ ;_߿Z|>Uqٸ_1Bl/@+qeee[d2R^XXbkJ5-AuQp8\:رcabhԨav!qӹ5We@ptvvNMME*5M \.H$[onnNCtKJm/m7BH<Qy ` !)B ̃S! ̃SCc;lG~Z\@x׶p['Ҳm%Գ:βe@C57 /{X,t:^XٯkB hpEYJL&D\.iO6* y<$fYղ,E d2iƧN2srKKZ;B=P}c -K՗zGwtt |>/#ߑ|mmMD"QڸlyxxXW}vեm?)UeFqĕ+WpM]׍/O&XP{P_^}Thf]???rԌaE6.[.ێV3ϴK!'60rJ.--kʻ_?;;,UgZ+]JCЅ @P X)!D"-z{{KmUM^__phZ<񨵈מ={ZGGG/,,(l1WVa[ie G!X).!rϟ4Mqlltn4H$-9@ګ6q>J:]'&&Wp ߷kll4?Ҫ6fff üGm eo޼)&j7-LOOm`yB800f|M܆Ǯ\1? _@.!z.u^ 9/@_<{4aF~l&y"̈wnucV|u 6@9Ӡ@17CS?O~'ߘq!@_@9%U/ Wz~O4>0E_v_%C @Bpm @h߹{+>qWlywaF0|9s&REv[ Et855eaP( o_ /GE-GSSjH$T@8^ݤG^VetFWVV رcj`GB54'&&>o~~^ze:#bapI@ly<2LoonmmMRtm/ ? dfטy|>U‚*ێ:Bʹ\Voo‰PԴ_ǣ*{zzVWWEmֹG7^^^Faiiinau/!Dj5d2fF0ԓs"=d2Ppy9]?>>.|/ _<0 <̈<m `GB 3=pϿΡ/j5&H6𣿜dlO>ɓ':/!N@N_!{EPab)>bjjJ 2___?}t&RĄs8onNbNZehhȏh.]T2 /`6V\YY) cccǎ^p8IOOi>TcLw;<<,K|>' K5+-W#bz)ϧmN ^W[477/,,f[߯n⥚_@'T8zťo]܊갵} ~0'O&IR̲P(r.V}>u:km_fwyy&rR/*؁< Ty_EOO~yQA Xt3nzr8WrH3ӑl6+幹X,fxf%/m rT`y{u2ɄB!asD"ܤccöq0<O-DmcGG*tK_ @<<'_pI @<ypI @<{XaF8J'رc׿Iv"]0FUnnn/e |^nGs0T4͉E. 6/f׆y~ vL>|v8 =裁@P(Tю_Z;@wj1l6+幹9=[¹ֹGo,\xQ<#![QQ#!GGGö+ 444XJZZZ+4'''7.]?pI@ly@#\xK! 7 .X}%^p:i+ո#7V_<!Pa{ߪ?؎pB}Nͯc/_B 5e{4)/ 0#[_Z;@wj1d)4 BLFollb1zGGGm'? n#-l$q\қzޛZD>}a)KݹtǛO@`zzzoav?I_:h4R(Ǝ;նgu)l_@cY@t:-]y}򛛏 úsy=\NljjRN$[2o_𣿜d@娨#Ճ"arss‚*o_i eBzkll_ϧYu34'&&+nGG9ɰ<QQGz[wLӝ.KMuPszZTdz{{nwkkk*ە_ 60#Nww~ff&KYzg:k n_u'v {{lnp8\juѳutㆆ%.z:׺2XL(v;]Eja *^kC*jiiq8iNNNNzD"ܤ_ved[[C0PvK /`_<B+eF/bewxxX:_YYx/ͪ4We)x^`aaAK666_߯;Y[[soss^+[om.\xsLӴ 5kEVUQ>Nwvv\.5cpߢ m˶譿/I!onN` ̈ .,,؎ZGy333|^򽼰Jc v޺Pq(zG@Pfp}CN@U<#Hnޭ&Jp8\4W-hx ߑnsss-ֶ/J'xºJ $@娨x|V:u0 qѭsA"pn߯Jinu}*jiiq8iNNN_e@@wtt1lonO^CCڶp%5Ac/_N2l rTԂyܼ|tDDg{F"/ZkN#/pb60#0x{v>;ir'''^>q:i+ _bxt>OzNgʸ/ ٳgϟ?2\]rCȑ;;Ǧv(wgΜyG功L&\rTUӄ&l[?cbgϞ=˗_ o͌~@w??>V3_@ ?Yu~M /U}6D".0eUŜNR nqbbBSSSiJP(ɨT* e4 @C2P;00W7鑗=N |>/*ʫU[j3gΈ CK}њTb J'رc&;m' _=4p?_7??Rz[;lllT慅~۩ Eں&4#ҹal"`p`;c_C_-Omtr㯣ZpZR3LoonmmMRfWr9~kD~I ]]xu?=&T#ztú2? |?pzy)wݾ2hvŶYY`?/?͉~?WL~7Ē??yxʕwb8|3yYurp8z<=}nioUs:aii+rQn-++@_ @~[-?A$A7?z㍯8?:;U}K?01]__O$MELU}*jiiq8iNNN !Y}swqyk;?خX)M&mmmb`L~ l v(Z_o?C曆WDrꖥXהx2Lm@+`c㍯Te~$]j L~ h_Q;;O1KpH_ɟ /m /e؍655' UJb}(اȨT cnp8|>u'ał*RJ|P0G E9*8XMӜX\\V677gȑuU6 C~‚*n݉*RJqb. {d2TPV+zfB%l+m /Qh|\j_^^a_Bʹ\v׶Cly@ btE=TsAHwww6ܜt&pX?zFoE ?*dj5J?!Y_/o2lkk3 # @CCCѝlW k p8ҡuҳXL& Ic4ϝ;g;)¶C08k7lok Kz/~o{Q//Ly{nJڟ?ٳgΜyhx?}F\_~ ]._ ŷ|AM }?7n^q2k壏>*}?gde巐ʕ+=ߺdac/?maݗ4wJ U'ٳggde巐ڵk/=%l `B=Az흶W\ e2<c8>#k./.6J/GE/_|_|>3_ԗKk׮8^|}q^gde巐euuEem<|㟾cw3_7S8L"B@<~>E/]z+@C$2 uunm1܋ 0Xwn\}^dSL0./pb_>ܠ|dpb#e`SC< S[l7_Nmd%lyj '{ _jD9 ñu+`rccCReV)ءwk\{zzV7`)B姢XʊH$رc뇇K?x[)^mv% Nap6`P}>,"z0~Fkkkn[ ݺ_ifUY k8ɰ<0.rTԾZ_Z-Vtgg:PJ=[)ώmPN2l RVOGP=]7qBʹ\vWzKV!@_5MV_xT"}}}ߑnesss9Ļ^G+,Fs#Z/P^d~ x|ccSTKK0Msrrr 8HWd-nL& u;w___?uaP )  Qo^sJTzbcNŹ ]$7_G>; '<;00nݺLx_N/njYo^HKA}ڰld2yl0|nF ?p0i_=K_{ ??y&d|eU5~̝m BK_S?z_jMo|[ɵn%Npx3  Sp8˓m #Ml7_NmuCd_@9m  / 7/6:_==rdOv=//M_L0.// ZpS-J Ӻ~ppv766NLLh=2M0P(dt㡡!#D\.*766bTzQmEk[o!9e^I,//' Uy̙QX`"RY(ƎvpX|<םȂ===Hr-!6vCٻW5MsbbbqqZU.5k///olPP\.g;k! _>^7)x}a)K`jj4M< e2UJ | @^Z=N |>/0{Rs&===R7F҉?66v5ljjR!H$wFUnnn/] |^nGs0T4͉EZmp7Nwvv\.5pؚny-ך[}&uݭT}W`\_ gff6ԝ.,,roŅYY}pb_>Tz<= O+ֹ KKK[uVrp8lkXL诬9^ٻբTp999iVX]}a||t!H87c.d0`0X{? Y{O=ޭ>¿08D<0lg7^ e :kSo|q_vPß\cJ=O CM/nߺP|{ g~`)C|_||%^UXx6F`_`&^z{//}W^=;F:_@^2 PKqgW*: _@gՠuwWgϞ?lϪ_-%@ugX4չ~ˎ6;CڍO}ۋ\_ `ply/pqI @P\_@q݉,ӳ*R^ΦiNLL,..Z+狍ȑuU6 C~‚*n݉*Rvwud2TPV+B-U_C_J3 p۫SR/~K Ur{:b1eGUEsmwdd;JynnN:Tjonp8=zTVC XF[ToB_ xuN&mmmaA=Ahhh(*a C:Tx\z!ɄB!ilsl'EvrC)ء@3 `숺! :%ly`CquFKZ'XBlyu}: `CpuaE$gB_ofH$r POz?tG,ӧ6 LӔCP&QT* :@ 0==-%@`Wt:](~°zJ%=ëK J'رcװIv"@졿@Pv(puImllTb),>O7 Htn*911xh7& /?08D¶r}} HA^j%6>pˍX,t:^R2MS$ e2U_jAjʯR/DdޞN B>T+++ևV MzzzO<~sf`dVѨt.+366v1ҏ,(n;[TYjjG_;Uy~~mӕRzZIe.,,fieM gYUB%[j_@v`p`wWt:r5Qi+ZT_e[@l[_IE I΄~ff&KYkquX:[춣 k*]%_@xTr__H5W*-ԓzs\8«U`$Q oCCR{m]_-%@`WT*p8LӜʥXheѝV' &bƶ:+x0 Rw~_ԩSzAۙ ElWvˎ6;@v"@\֭[ɀ 䁫s5199z].W,3,_@Btjj4M0BP&э<tFs؈bRzmsQҶ`z#叞oB_ xunjj=N$R=xeepcL[, cccǎ~p8,|>N)NmQ??YzD"\_v)ء@`wWg4'&&Fx䈾~_XXP嵵5ۭ;}>_6Ue)#69R)[CV[8 5Q/\66TWYϧʥFm_?. v_y _vŔz<Uёnsssҡ3tp8=zTVC XF_9{{{7@dL& z000Pt[UGGGt*E,Vm?C& B4sBO:{sH:_~]0;@.uV82 `@&8yrr\X,oI΄\kQ'XB_ :ףrcG 7;l%`plyZoO+K6w7\v{ F@x֬CCCtF\.*ST0@ 0==G"K.Ƽ^ޭL HA۾.,Xu/k},npxee%D"ꛚT˺P(*zzzV7j300 z o}̙.y덍 V v[}U^[[sݪluN݃^p~~䥮z޺Yܺ;Z1_Lp&,)B&lmmMRrxx̙3=Qa}[@n_@@XcPP\.g;\fggngg=ez叞oB_ =zTڈJFѭ:22ݝf<77Tez<77g 444hVssXJA p8_Y1۹X-/=l v(#dB0MܹsD4H&R mmmaA5ҥKHF x\Vկ' &EEKCP 08Dx~/ /'Vpu_?zu G6M:Я|}g{.Oo%l `y uM . KBÕϸO_ ?pX΋//G ly8\W7nk7ؒoo~ 䁫3 ``\_C\_2 3 `pu_V~ly\8`y/pqI#"@3 `:rm \_ Z3Tlya'XU~Zly PCԔiaBL&444xNg4rRzow*H=Sm6D".KZ^^?tݍj4E~M /]g566zecukXIapEWveeP(/;v-x-BGG}ݧDYꋺMӝ.˺x),zmڗܺlkh9Gk EկZ'jx{|ORЇ6gffwNG#Jo5sϦ`yZߑl6+幹9=5vll{9w{KKn=Z q达>ݭ+ֹ KKK[A\.[Vln۹ ;@E`o|}^2@ p8:::ɤ|W øu떔TjW*3999iV,V|W͙ND¹IOwttTݹ"o]I/^vMT=E/_|ש7%;0`ߜ?^ý I&r]r劺>Tr;j޸ϸO?ء@˗/%q^!R#{5qU6 uu_008D< è7nk7ؒly`\娨}@<ˇ?//V_ /7/쁳<Hٻ/;P vCVG08D< SrI @Zply#jT* Ng Vl6\.0s\SS^Pj^xbR|dttT/(zH pxeeEqIJsllر۫jEQ )= CiNLLhZPQ sssX׷dY)Uvmw}}]wh*.;33#,qy'Ozr-eq@.o&lmmMR2Nwvv\#8U}ҥKRNjkE7._ۗyb)tttw}JpbDp{_Seq͙]۪Q2*_}UxyyyR>?+e.^.'{@Տ>s挸Ɔdˁp8IOO Ѩt";vPe*v^ycap6`p׊acc*777ϗɢ>O7 htn.,,e+ںv^yc@<0.rTԾN˥f582YJ\j_ra+iL6@pSgffZGpjRkxhCB< G,//i/Ts:aiiiDJ57˅aJ Umb;ZRa bwqi{D"ܤ_yG:::*+˖ZCpGL0./pb_>ܠt:dY6NPE`y ]Tz@5 m vC@<0./TV/P_ˇOd@/Tnllb1zGGGfH$r PSR)L>g}}|T* q ֫744Fs{ؓ R^ޞN B>6kifV?ʸT&dD"*ʊ4C |E^[oYkc=9OlyĈw诚$kxܠ7? ?䉿_b6tO` /umI~뻿  wGڿϼ!@n/wqvgyO1L0./ϼ\z+-pbk [|Ԉo^YV!@81]P}{ ? n/Uk7>o/ap6`rj @_. d@U/n_>m uB`\_@U|Ad6XGޭ=r?D~6;_.ˎ6`P[~Lly_@d@M/nETؘu:Xl}}]ollKl6\.0U'CCCGzF\Od@-/Ԁn"AU/knޞN B>6ȶᕕi;_8>OsDmz5͎aIaoN;;;].p8JAfq@.hPP\._?33,Ki4SӂLpb_>T=zT*ŀJh&[p8{<ճקۍtwwgY))|,d'FneVf2P(p8L?88B\JCW7ֹeim gxx8Hx<{+Zg[?`y`_+"yaaAwiNLL,..z5n7?08e_8tN;;;].u8w2:dz{{EU[[[SmNJnF6_ Dy)wm"BAs|>gk3=MA _>ܠEx|ܾ>mGbRF6 [aiiHCU!1+m_j}twwgY))„lyrJZZZiZ73L(RΝ!!r2lkkePԾlV 8HˎnC dq@#T=p'@c/{eG n/6`pwuuia466F"/}KCfkkzzzdǏ_tizEzr:::wRly`\娨_z衇ܼŋJ"SSS׿FQm[_(^}Շ~n)@<ˇ_0Í+QR#&''m,]fddD?mwK~ __=qĩSV .n{uuh+=766e)@~֭[?p0t\~J0RUG[-C¡_+'N(#Cʊdq@.BWZ$/MTX,611aC]?bu)@81|_=qĥK"T?9~+^~_{RTw'?^ |aR:11 MP4{ڵH$=zh}m N3 ZWowK~6C<_n/Tk7ap6`V1lg\xk8y`#0.rT-[R> ?pe^JQoyo}Ej _@݀WҸ`0pwܿ3_$)1#ڜw{"<2 Vk/]z+-pbk [;q{C^YV!@81]}߷W~)ء@[vS"ly /6 q@@<T=PG / /  /I_NPˇOYlyNuww#Gx|+~6C<ڻZ?/WR m vCp:Ujz0˩ ly`\Si/ wwn𦦦L4 # e2U~M /UcU2/..677rJTxm ou䈈JP;v Mzzzeyԅ`0YVRx"ʏ|#ʤEmY,~j=NKࠨmu6q/5jdWq4 C}>*KVon~iJX+yS߿kkkn[ 333RoiiQ*|I/{I׹Y/ n6J _hkkB,ћZq8RE!y})Qntgg.^B @]糎hWB>h ( Hkx^)T*}hk3Y|ffFi|݊r jAwo/Xp8644,--m/^'xbGo:22ݝf<77's=Ν|oiiڭQ)ק=z8GѢ1[_=gy @81UwKz"pn߯wJͶ.]RvʀwxW øu떔TjvJČ姦iNNNn3L(Rb;?qYy/66C<_=Hŋu~D6`wcM.\ E9m  5{T8N4_yD/ _>ܠ@6@@ _ȑ; Edq@__NP7T* :@ 0==VdH$r R%Lx<[4r/'{@".j6<<WVVY<׽X8) 5MsbbbqqHRK nllmTymmvrj;x"@nC&UmmmMR^N;;;].+!%IDATu8͊tu3@<0.bvvzR[i6"K]pz<UаHקmHwww6ܜ"\g"Odd2fF0ԓǝN\oiiq8iNNNfʀ萞__@ $ /'Fj '{,O<':_.ˎ6;_@9 m vCauuia466F"/}KaQX+] ?~ҥK/;100`B9m  @oWWC=(卍/Ed)y=nRx۶P(?]XXR/ wwn G\Iue,]̏v<-ly'N8uT:*vYG{_鹱q/Knzᇃ=Tȁv1L[ !@w|'HP5"e)jpUшYTDVKatU,r5gY@)Ȣ"!Hq &{J`ץI !7`@l`РsS@"C.9ڙ7===ϧ^zzf}~tw>Kza[Y֬Yt "99965~ _Fp75jTyys:"ZbEzz￯>|^jwXn݅ .1cFȷ"y%Koa  #nkkGaſF;mڴnݺ%%%G;w'w׬YdoNqԩw ߠo{yv,[ѧE+ei |`^hkj:ƌڵ#)G+ؾbm \depOf|efjd8m ,;wR:+W.--=p`Gի˘)˾[PP{F$xTWw}_rI5N^^o#kJKK8pO?E" \G>̓MMMo#k6665_FoT7e|@n@.Vۚ5ǎJN0G @>x!@y @>0/_ (@n !@yr_/x& K`^ asr!Ζ֚ _F/ YN~S맧䧕rF/э#<ֲ=u3Ziͩ7G >)n_?ufmJe|hNGC6n_Wgw}3;na^mVMSS |`^Y9|}᫷?Zio_m?o`m^0@n_7{gg3.]-X,gN.zGS^ |/tMc8tX.ֶO|mb߰=udɐ6A+[8`& V^Y^gŇ_e/C$pi _g;)uwx×_>3sfZrrRWȠwѢa={~G>pS!_qW&[733n9yI)h.sě?s2g4o+so/xX[p$?<@>0bnRT\n/-=:7]@[=z$g=݅XiӦ 0ɏ@@|@7/э".}QN=c |KB<l?+ƍ7lPTTTYYYWWڈo&m ݰ`;'ݣoa~|뷾iĒn\i&qUUU Ćr|`^Y<_C~ZpkOzJĉ7ߜǏ/KwȜ3g,~\+%%%[l),,'뱿$?`D]Gf|كvg=AHJ?]\<-u#K֮//@n_nU>_=7rJd*r`!vljjz?wADXyÈ_.ar~ H/-,,,//g`. `^ 6G5VO~D$E{CIQ%۶m۳g4_71uc߮(a_ _/O+On#4_;|6AJK~Y~7ex_)n_S|>E[gKݍw&m ݰ`6ڊ  $+1z_Y]p@n"p:spж#}I~r2;\YYYZZiӦr.وlJ6(6suuuUUU۶m+وlJ6(|q{쩊وlJ6(#09_pOmnn1ٔlP6G@a7_7Vh=d.!@y@  {&m ݰ@Py!@yr n@>D?ug# +Es2>S}hB_.C)' qR4+SO ;Շ&=ە-[Y)e +Es2>}h_x|-N'/H\)+x)'C ]/ojjjnnnkk?>Dsl)'C VUTTTUUI[[[O\l=[} IAgPoǾٜr d"|bSO ;ۇb87n(Pmm?>Dsl)'C e+%2ijj?>Dsl)'C'!GN)_ӥK.~ǖS"vbE=gַ3p&,4~Y={^-w? pFО>]`C{kRReYc܈FRҥڻۻAsEGΜ9SZZ?|H1u_% _Xĵ-Y7~Oݯejj^R1©Mګe~9`@(Э[رvȍ.*mT ?:3bf3v6;;tm+ [o{W߿9-&=lּy;K^<Aȑ 2f͚hn~S2)==-|޲U+'$531.>ߙ3 ';5+u#;W<| iӎ./ϟ/O4"~oT6 6WO_R|tp/f^fD_o|߆Z&ᇥJow.˒ʸsƌ{6(&52Bxz$'_nFr{mWl['L݆v(M7_VlӬ|^Z'c3>9u* ݳ'OK_+4ʐ$BGC42ru͌='yev y )c]k)9䷽ӭ!uP ܗ;P4D~o u 1[Y;&?kbIy)ze{HfZٯ9h347ʵ# ^sRt6*kԯ G`C_|Qkewqfnŋvq\']YЋ eu͢`6PzlM~_30$'ф AY̾bhuj9f] ,һji?,{a%C!ut6ozw`sE4í:(VC+,h{cVщG{ٮ*f6#f9yTyN6oo~jʈ_*EV}8 ϕWv,yL˽{wʧsQРTv6h*4 z(2cf2YУ!'oNѢb(KcɂBo⇐'oHµm}){7ܺYtŠ_qV F⇠oY<#4ߓ'Jm¢~jJBoI\gx%Fhee:5' weoȸx/A*,nXG fJLò M-|7#f=?},ͺ5,=:".С21rkqJ&ʺvپ̺EC$ !b1kfOamt"f/<ۭ{xNP`/77K@/F{J:I2o@wOђҶcqaghȐ'LȐTze)G''1eec;v>PZr֭+CyE8vl~O:ؼ9G܀vMM+kb`ng0 5,$7p ڭ>} }LM &>sfGC42FXU{9s<ؠu EQdv _X>XJ0l4PuxnF#om" M-o]+]5~҅8?k1{]W,l_{ 7HĹo.C,4YY$e=R.>,駞~MKKtɗ[0޺+--yf~&tzY^ȑm^$SLhWȂqjAAnZ}YY#k,[1o}SlA%,&עYB ii7iZv^$u>& DPWh ed_H`C _-*Yvi?8o/+}]pܸ;*+eʺv8ykclIٰa~ H4{a+&(h8moӊ6:KExNNT6 o,Źynz3;Z]J.Bڱ#wΜ)ީ>пsPoRUNƎ_wٳ'ihw8}vvT tW&7 '?&'_.c(y_=hv˞=6{A?D^__/a Ibu}r^SObOHhߩ>B~?"|TB!??Q;էPOơ=JE򲌄ϕRWB~R>4/D!|.xN iϟ>{ϭ*|6`Qh᭲ ##shnVէwMHkhW-42>WJe<^} ?C׋/// Z. ͶϭOWO@ЄMMMuuuRyCj-uDVB{\FVܧǫ'}hB_Rm1UCj-uD6B{\FVܧǫ'}hB_X~kRk( :2>>e<^} ?C`_x6ζIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/images/watcher_db_schema_diagram.png0000664000175000017500000022012700000000000026007 0ustar00zuulzuul00000000000000PNG  IHDR:x5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gUzTXtplantumlxWn@}? H $!^)Q}*k2ٵ{`5AJc9sY+u(R0D{B>+9/10P"2h@{=_Eh6oIaJԍ#ל;G!ԡc5ڟC/NL} D0M%dU Ta=H"LO 4 /,?mFBp %ft? ܁5ٔӂ8 6 ,P' QB!o>SPx 8`7%9n4Sq4q=_W Mq q95r뫬;#mZ5 fPWA>WY)Y"o&QӘiiWdD 8%&]O'8}yl+ͪ"vR?*;Hp & _Gy$Y6ùõ OKJQH2ܟ>NrA,}LyD> |ݰ̾o_xϐuQ*A~~3Rio8^7?5ٿ 8ugiП2ųuGics9p0l`!)BIh TY|qR+z7-0iNf9]ѻ#Q趋йNZ#JQډ(ۉju4PïW)~rثԼIwu5NIDATx \u\.iaF\碙x=EfjhfQYgZj^kH5RP"KhSZFKȊbH@Sӝ9s<9߯ϋ0g|gLta   'i>{d?A 5i 2 vP*8՘}<$ RS:?ddoERfP[G%\3"uV)݌Kmsە .2p쭨SʬjQ %PhCOSar QN ۽{]L̍AAeOeNr4~E Ҵ<=-uޙdIq8^wKw%t˔k4@wERf}PZ$Pn/PC9sv5TrJR>W\!^(kY ii/+YP繝ho7ފ:~S'sS|RJ<ܻߴ]]1TP/rrw?g)SQ)YR;R}/$ԩmWZ}idb"MsRyUds|W ^ɲ%K0!Fn'p쭨SʬouFw/{ nx`#u Cuv&OdcG ݤeZtzOSϜ>!in_еo߰+6EFDE .?MIs{hppU_~3UCWIjDD聲-ٙ tW)eַ u3<<窏7{縗^^5ŠA\w]O?Ҟ$ .-C ޻vxNeeTvv ÅJY6Ԣ׾mC{}eHOaBD1|xTisgn {PC0~_Υ'%Mn&ABq%ʬojfQ?L? y!LV>!j Ul{PC(lfq,鑲ا\sCC{YR4.}{>lKڕ^Z2&j ՙjS8 !)g2`b1TP=Z~8x7T)c*i1T*Ԝ\C׶n'B$$ʏdnygPCkI-lfAĸSq꼈qe٦iByi#ϡئPW9T N(-}7*j@t@jJgp`GHFQ$/<1gcIL텚OSg/:o/3y!$FB"m {PC%MB 3>{YMޣi<'4>~4>L>T 0TiP@XwJ9i߬^?`?0T1#v 5i3V7dm>d"`@cF4jӸC_"AxiP>pw@mlBЇJEd42T`!A 5qg )Z?#;իXkCˤ"  5i fPtn@msHJ4jӘPC% 8 0TʦPƜ*˵; ꦝBM4&ig%PLrؘO@[HдwR ; JG/И]CPy i C^n2ZH{͒v%K#8,ʬe[SS-Mwr ,]:u_aʈp&0.n̶m@iJ)#njMb.)jliP*jPE$hnp1SPhgXW_ tץ; 4M;P 0T3DחZn1v 5r`]~oڎfnz/!!iOMMq|V7/!h;%AN&PJSGݒ]Ta;r{2\[["S+)iUFNImBдS b*vn^QrBMpC CʿM;4&+ ? h6v 5oJV JP G *RD BM]~R`^mTVM#PܘÅQhofeDF*):z`aJuj2 BMxKO)R`viWRR}bcGvD9.-1q秅TFnT@R q*aR``FCuHdδ 2ս:^a7ĥ$EEo˔ak/И>ִS)*Y)0TGCu mH+sg~LZ{۠a7D7-&)dhhO?Rv XU@4f* %))0TzCu[Uܶ磣>㺫0%$HC8bDژ]PoT3CP\C%0T0 jm;]35?XδN6fj \w]_ۑ4f4j 5J`ڿ5+IX6&ݵ{:Y=ZlԘN4…2rM7E*#{5k^R. :ƌ𱦝BMPPmQQԌ]QIyn#1T5CYveppiÙ* RGt.5fiASJs%_D>j-j?Rie3d^uhi)j - Sϲe`ժ J" >hڴ nT3BM4_#}\Yx %S.Z4]f55F~hҙ0C82*`iA1-fP)Ԅy 6ۃԑʔEEoT l + 3T(!vS t,&ĜqR-޺R9c/I LpmmH(fDDxfkfP*e*mH |P;3HVY/OԦE?j[{"*ʜw|0[Ou UV5ꖸ1I&["#nwJPӍ{}}|N6SxUJ*u*օ[Cu^:~nwg~2'[zdiҢ̱itGb5T4Tܴ!CHL$lC~~ZHHOa^t k~n'Nl>r4jʷ3:gڼ 'u[(l ;hm?}[:' ոP L}_#bԌݻEGQ{ڙ Zwi Uڄئ=Ҝ|a|{\yoڅCVF&%MpLj.-\fo t >Zw-Y򌁡ګ'W>SC)5@~*nIT&2/288H:JNj4Z[["#35+>묯/C([ ]8 3-ԺB]Bz,0!3} &ZmDAbn{DwmǢEw^{oqAsg~m3FaoYû[htו~k7z|CXǠgy ιZ8gmbKJV7Ty˩j :cVJjݻשQ##{j^MV|6LOㆹnmGG|ǝ4Te;aj?~;{|=μ\;ƍ_Iw_hnг$F\3.w㶡nmmmCo"SC8CLNC2'bб|Ech;٨QxTQ:Ҟl:/Pݻ;qcGo]~19]~u_ȡ:\2T᪡3ױם֙>àgI Սι)unf]~;U0Ts^C% f|Euxܸ;1lucbnƥnJ[V'ûm4I% ^j_> 2>ΫwPmKwuKwkuuu[=[PuvW-NjZ57n)R>SCvh^7䌕xŊmYN#*xcQ>yp(m>INcpv+5nY.ھD2#DPe@KKIAAѣo=p ]f+ú˜2ϡ:t|92RN) jR=syC:l;3}{l C ]mo8ig=l61q-ʧNSC[CUi;/ s'u_|nM.6u@xoWpm]Nc,iiBy6^7^^{^cdpҶ\1!wc&r5 hʃjsJKߥ]iP Nc,ӧ?3\zySIao^pѾԽ-wP]쁡?^C#$AӎΜƺ^+iޞC!v 5_o7`ȐkqN Sttz hBM`? w3OS`್<v 5i쒡,=HO*p h)Ԅƪ'YeipPIO*1#AR _SPB,mɒgܸOz P3y4>fjNcϟ99® 2Tnr=lsJ)0T1tM;BȋC7ky?ANPcn* PIatM;BC tJAдS)**<̟΃iPSP9isP; tOAдS)*(LCHatM;B„}1T> {BMxi B7ydL*x1"AR ;uMdhL*x1#A Biܹn; N~J&vD3:K*=CMdhL*xAcFgI1CPs.mODq(0T1M;Bw=lD&O0LC,k k)j&6^ot݄5j 5J`JgI^MXNPcdPg Β,IдS)*w,k OP{7 C)EgI^MXO) 5C@V y3 hAp{ 0T C h)٘1ӧ?PC%C%8;46[4cCJz}࿆JgI1CPg7PG:^_C G ¯ Bilλ7UCu^.3*`4fR9;I9Sv32ZYoR4P Β,IдS)8{yM;TC- {*,IcFXO) 5ڻNRSdׅ8 \9mSgwUwv$hڹDӘP)Nm/3+Odq[7fYiP)Ծz77m~% SWoyʆeՄ *8`VjNE'We+;tL\n! Lxk)++ݼy:BᓃXUU?qpߞQ~9A`a~C=x%;;[\g=xrᓃX]]R䄎1# Pe{nB89|rkkk1T0/ك_ȫldZ ڎJ&4R]]-8x`YYY1xrᓃx9 rԑEk/IڎJ^}kũ:^29pr 655a~͉ዓ- 8Fb ȸ⮌5#pDfNe^Թ]P>pσ{_.acU-]goww+629CyqZ[=|o3fQ:;j0Tb.>cxBYo.%k}O*7Ǽv5}վ?쨠3 3m4wnmw(1`|cONأK]jimr/dX~ӣ`?q-+2Mϟ\fέ|4 Rӯwâ/izgt[[z.b쓌nࣗY{/4 F5zof۶˭[7囹+>;tx߿nC-[ 5i *;(NJo媭ʿgs(㇧ue્ٽu ``֞w)z &IͺNv'ҋ]$em^.Tb`О|{Fʿ~} _V44H8dݩe ڟK-.8hk{EVNLb'`б44~8~Ύ p_~ Tv Xiv?8w ǏVi>{BGQ2f1!? S}bv@{Eh D' H7L=v `.>6 ,=yv{ɓ 3凷^qw]Qv viIxPމJ iih,gIz m,;CϞ/ySPCEk f/W+C L;SP0TPŎ $IPe6qԔzU+0TPւ5=ϮPBů^-=]>NM\LSgJe?`>η'k"b?_ή0?- {_$4=GH*>]*t>u߳)#d ę]~v2wB*k]U=9rHxxo,  դUn޽{¶l1'lÜ~*f/^|駃D_|Mŀe̼yBBB,ˣ>`%}]|||l2u ۷o``СC?c{>~dґ#GjԩSҼ U80TpPbccȤI_x ^z%̙3---3gδ2d]L*D>W_)2Z>}U/Pe9_2,ZCUˊj@@ۭ[322d/;ߥ]=2NB ՛Pu*/"ՠCQM^Cx+UokMTw[ܼys޽ʜYH7Mgyzt09 D ׳ð0:5Td}fL5TYxgHHuRaС/rDDo=_P:1йFRRRCCwAFF6`\2444(((>>^0X,}۷CGrScc?`~L·'kr-=ĮP0T0 }8P;+BYoĿ~P;oOx:]*(4՞ Kv%g|{ڳٽ)ϮPۿa' ۔yhihdW`톴+ۯ{`*E~p'W0vcobi]4|q2;W+0vhN؋ϰ+Cp-Ga?`mRKk Sy}`Y8hM$`)h>{>ߞaW K-0-d CEj``"JnsD1<9P)b'@GSWq4Fv`fp <[0T0B4y 6 |<0T_}0Wz-PA֋00TdcE]XV:1`E5% sG j[Қg|'VH[eA.vPc )5%A8RS0T us|CG.>^bnW˖\3֙VWR_71TcW~0T+X~P蹟Z={>LK)׎sw,cs{ )gosǪqz;㭮:i;nx`*`ۓ5~P݈8l{퉩~sWQP"*j=""|͚TQ3--9,ʀɓ'd 6vkju璘7!!=e>:?+#/#CC{-]:Km7P0TP=E}yۆZʂw+ޖ͙3tʿO<1^5!Cݽ{]kxMݹ,x*6vdmmL8I]2i]R1][C7b* G#3`=Co߰"=zէ'NlW/\(˔ᰰ+O,TePͨ* ѐaaߡJ84C2LG,pР Ubbn (kWnetWx2u7Cm/CZWqZ0T n{ߡkך+1>}}Bd#Vzvk3n*^||NX :zu6=ϡ_UEBBz*#NaPe2yЩS~j5ג%L>?yYbʔ{l ތ]CϿECqZ%jOՖG_X/fkEE Po>a5Tkb\S׌)ʊ%$2.%kѝQwS1v4Õ}_ji. g9dݡ`?`mʇ~4>^>TEee(<=JFE,7LwJ5;RN7WpRIIΟs]ؑ2́6~cώ E&gSc_3iz %k!9ԊsCC{YolllSy _: ?cjDy܌nٰ ?u} eqfanPgCAjnjDK& }yzڒEd4]_)?6߸S/.Y߬^}IJ_*`Du^)}0y&u;׎+xWP_~;}a[0δϞ* ?]inm. ӽs 6>` *`sZWq3KY uo#P/ iw pt5ԜZZCbog@ CQWJuΒ38/) a||Yw?8 0T/|ꂣKqPy Lҗ3ٽ yz+MgeǖGWr"OC%3 U=YCW-N1.](J~ UK-9ac88CՕQj<K`T{w8&*P++ b>~:Ҟkj'X=®JQƨȿ55ţFmР ҝR>MKKtE C2qXiXhPe`Y WFDzm\ܘm^wE=c&kZ56~3* ~gAAMhnpP$˜-%UuuN)2 D;VkC xxg}} vJi%|P??/kC =ێ׬۩޻OHmjSSS]ŋghN>oy`-99ޥR#,xJe/õ%oDDxfkڞRiW;%J^jizd 8^$Qvn^Q=~s-T]=]BX PИuٝMg9j] lSٽ=w 0TR[zr߾JS.T ռYWr1T c?j>RW)(/r#] 0T s\ji37xqx~E(`7OqGu.S]vwo_H_wLd05ԼXR nEEX,/ҳBFחys``n{>;%IC=t(S/l8n/}Ÿ U~onnjjKH{P@.alSS[`9fr=y e_E)(i2T.C*lidy&YܘÅaYY), WӤ%+P?`\<}fwMʷ,ijں|"v]Zb⤚bY]~~ZHHO UC\+`CP8CbZ-^P4)n'gt??ѥ 2Kmm%OMKK 2 ڗ,yG`(++`Kt^TL vz*J`*xw…OE]:Qϐ'={caԌڵ/]:ueDD2r̘dj YʘnAAA"7`  47Wjuuu;m{=5=wAٮ]YryޖnL#v;bDjk.5C%0T eƩ;] UƏ,B4;i*]v$JP/z /P k V-Yh#~Ҥv^'LM}^֢>ZW CU/^Y3֏9xg[O,yGర+Rs:fAdE+99˕ DF%0ҽDz S%F*I~%O,vD݌Æ]^%ev C C> \M^ݰ{IPe.ںO-""ӊMHPeJ:A\ܘm^wERQOYwϞ,PW ex?o껦o.M4z§,V24ݟ~b>cԇFxO?#2 R?(c͛bX}цe~wihhe2o߾Cm؇~اO>@JGO:nO;j{ D~~:R;\{ԑNF]N磣>㺆\)-TF, uڅKhDyE*Æ!`檑bccȤI_u'xB^z%̙3---3gδZfrr.r&O2_}ذa'U}RiO. @G\C kE׌;C/m46~fηFoCuh]WkbnZZݾD} JůN,U:_ _~U,믕auu7, `Vt2_J=wyP{$AōIHmjSSS60i]wՉP>߿5tPջН_%СII.\(StS$PϔaԇHƴoU5b퍷y޽{9?񏑑2oC\5{SwD$U2""<35{J1 kPe[HW/X䀫 &ҩ^ NY__:nFL]jXؕQnvP1T3}SԇmC5o}w5 f tFVِpСC_~刈# շcjhb uO'ҋ^j.CU:w\CCCll+d :/Vr&O#SLqFzyf [nlCCXKh+c[Ɔv~<ݽןR2_z%wu̙>} R?o􀀀h:,'!!!00P~/[;v,<<|+[*A;VU0!öׇ:_J5ڡ uG o[]{w#&X񘡺{ʼ b,bbn}so_iJKKJ>Jgtj. w0ԎDzz㖯\2444(((>>^ͨX,}۷P SR45imW^!6vdG|;6mzeȐk$'6秅B5J}؎!`|y\j A"RJQy~)#je) k>W`i_$շoXUtmɒgz R֫4WBC{ɔMWsr+ލuK``7:T=R%aediEE?4 CP0TϱgJ9k/MN6'|d֓YdvYD*wS/FF;zt=CU+̨4)uRo.I]N磣>㺆*_\]-qU1Եk* ^lV7b8,̆*QUUuK\g 54׮]kRG}+Յ8v+ݥ9|j}}i#55:|{u}jڑ* _]ٳ;}z |ɖ=/UTl-KNWG޽.:z`L̍{xpBBlSӞIMՉP>߿5tPջНW]vСII.\(StS$`_ZY5 0Z5o042dL,#eF۞R/ * 1Np7^CC-IU:xef|O],Gz@2~'\5T+NjvR|YLNl5,JYQhcPV> >nI,ں/>~{OC^QmM CĿ5PCNƫ_OQ2ƽ7J*`"m@ IK* 0TҺ[/~Gjs-8*ѹy Vw0y_?Cj9_gb>Hۋ„ K``ssPYvuU^ʺ~"SܣaVo^\CU6b5`Ŋ,}7C=cޣf `D8jdd9MM{Z[oJ=-JQ_ +ʿkfN1TU"o5ÇCmd]Qj6P۟;VqtC{޼_Z,݇ *f@@>}OSǤ%]}4 %hte8'g2AMMQ(VWm'],Y>Uҝ*uCw2﫻'pyѾתvYfqeX}̻~#ovd%,Eu\m1Æ]/_Mʰ6Klꔺ;AwJȔZ+nunY{9hmFcm}Ϟ縚Pm_FE&;ujlx1}EEoT (KK3 {hW-֫\hɘ93ΞN}uW:aBhR7.R<ٳ=c] [9Vpû2t,]&Vw6s1cn3xN0~嘲 g\~# Ul4xPPjfjCU_R~~50SF4h 73)h U''vO>PRxt'+RI7nJM}>:zs=*W%^CݽV۬﫪Qktthᤡ~#' lk,մk׮5$חX êUG%.nNfPmWvCU#Un]~g]~e@6xE UwPmwCC-/ ^j;i; b~mgϓ*<~ eX׬WnKwuw]={6FFS{Jld VȢ,yP흍wm/ ׆ 2TiEE P{_H {O:3pmm{)35eO)\M`U&,zmN}uWzdQ%+U117AAѣo=p ]f+ú˜2ϡh'Nl1JO?H);V 6C{v2, RN3cCw6k``C%A}{ŦM wNvTr i^ ^G=zGF+-}孧xUXϡ~ 0TPB}_ji 0TtJ`f!'l셪STqR[zctjnL/X,ݣX1- jcU`CCP;P`Z%Er{()j}dњO毢 }(PJ ĉ#FDkSW@ZZrXؕ|jjG%0۠A&su~~ՅF{sUV r}D- 5ZHʍ-]v-^<#88(4WVVʢEӕ᜜$0Զz;C ȋ) 1Vߚ>ƏW(KKbSDDΥ"pWwjޖ;cCBz!``w7=A5BB;zQ}Iz~~rU8ݹt_nCuo$0T ,H5ZMP__j5Whh](Nݹ4T wP7DGl˖;4TWw,P0T CnǍ.v1 MM{jjWߟTQIҾ]sXG$J6%P0TCN كS >kqb@ %Δ&4`jkyTwj;˔\ dW<0TPݑζ4urzb5`Ŋmֶ 9u߳C*`&5TVI^8JPXtj<PݡЗs8&1ʼ!CXϟJXMMQv4(|~ju}Qwvs}\fp'sj#%uaŋgJYh2`gjG z*J;Tz1۶.WپS{8)5**6EDkk>퐐갺|ejRWqxb^oAA1*hnоSڛ@wE&'s[x[ uܡ*jX` b Z__jO?7k׮5YMUC-/=ɅnytjLCP1T Յ MM{jj﷽oxX]N ߣGpE&1x wPO)p[FC5X&`*S**j@``7֖WEDgff\.Xr;+GiEVp'{J)ԭ:;\P I`*N)P0T=8Y[z# j.CX[bD`mwG |PQP;>/R jcLaۯ(`bjʧ.jc`ņZY7dȵKU}μ^wENR,yGర+Rڲ^{_0--YΛ6#(*`݆j/cNf76ԥKg+,\znjM>Ə3« 9d ej#jCSW; |멱ھ޽ 2 rFx^V2z`ИuٝQb#_ï@`kRVR2RsDMb۾JSool^.3A~4n$ZY5 0[rr?uv{Sv32ZYoR4$  5/"B) b½ CP1T^UX 0T CP CP͙íb`"x`V*U`j.fv[Ug)z&u[X^c_J /PJj?PJKՆgI*ܯDa-/?_0w4O6Ƹ;2neE-ܛ#ㆇ;8]gޓ9iFoW[{ uoʷR AYYYnn͛׵k6ziS̴b2 ^yJ7}e߳aޒ~!Wo޽?IG|o3o|;),RdTUUyQI?dsWR)՟RwtSk>O}pW]pٜeՍZWL*Y7p>Pdlо˭-ㅭN8T|!g,S+% n8ߜPUnNh1hp\]>t>q*3G6 |9 8T[N.ȋ Cnpʞ sɡV{ =9W16;N7qhas:˹Gw~ꎝ{zfKCM- E :p. o*~W)٣$l1CRxSkȳwqrKsl,z/E:W~3}2u 8T_bI_lqxtp>Fm~6=-voV-c >W?e G(yd—[+h]ʆsCޓz~ pPԬlՋyP'9zG-+݆r;DkLn|Sk[^…}Gr,HL|2u2Cߣ gWϞc(<+b 2g,#%{3[d`Jԣ 3¾#?8xT]Y=#C*&q 0LymvNrk XKp>ɑyk?_;vZaaa>u9VRjsjmQu:n ݆\N3IjveX֯+~PsŎmI.8cիǏ)ȿj Ȟ}ʔ)S1Φk׮YV444tΜ9t);={wm6Gj3g$&*k)^ ȯےwjm丸K3dW3ϨiӦIs]~}̘1III6mץn[Ν;wކ` g*!::Z(VU:>{ eee;w^fL7ʷVX:KXKrtZ>2 p'իIIIߓ-,{Ĩ)դѣG̙3:y>6o< 7oV.P_{!кcYKݣdoM*'Nv">>~F+?Z W=p$''^m֭[}k7kxP]o=;gLXhQhhhPPjUq#""vͯ 8T({{ܞX\:\P.8d^wE|91{_`Ⱦ/YGVP {#|rrݱӌ9`13=-H}[qh>N, X_{6_na)Oq칿:P"O^tp2ơY3aN#HCm.NX{sl)cZ ϖ1ʍ;NnjT~cGP[uW =4J ơtM7~2u 8TKÅx{Rx3'WLe —[+Ŏc -/sjKo3q*odLL^d8Tiz\tοt9]>qXK;OÅyq@3bqߋ9_׮78T)u2w]cfr},=0Cu3}oDrc$g8<ŸlzpuW vruA=bDZ& ʥU9੭;_o=j8Tw84|d*ʖ{ۖx/VL\vY8CMɞ}F{J[m38T׸^w`w4Sa.Ì x8?9"WϞ˱ `o|r2w^­~nprMhXǦ 8TPڊ'_nCv Ÿp~Gu^ޗ2/}z#i+'sK)YGfP ~ȁԷICmUv7n%_/ápWm(P} x/f,-++۷oѣG/]Ė8TUĘ=)1_k/08T;ڵKLjmm-[:PW_'.X6>{FX<<|S>`WY|{m7Cu^_f͛Ť۷-p\w$PN8bqU9o&NטׯvO~kNfο-cu+V[VVVUUŖ8TI*ӫJ~eWy pfJŗ5gˏ/{?_AN.uʕׯ/..>z([:PKs:vCŞxZaVS%%_Pp-Gԣ PHK{*ӡ=UubR)ġP[ju:?<M{킈ǝ9{js&uˏ?ݽva=qN9Td}lц $w&o_8P]}r?{OG{OjI -P 8TM'J{;|jmCu)ev?EC=w[* `PdMWګ;NMROlr5Иqz2|O?aM up`MΞͩSWV,&##Ö-CšB[w<[@WC(dyn;i,Ν;W46nhLI4wRgȐ_\X*!W;waQb)MJ~PC%+Cšvdw裼=zcwڱcC ѩS[rUjD%33ġJW(Թե7fܡJ8t;wᅠ@`PNvB£:ܣGdAB 8rFeyĿ_95zLoJ J{-yqm*WΡߞ;RWbUؗ/sjl߾b ġ4䚷m@aA9{w}\+HkյPOMߤP4EH-}#G>^|3YLL1b}Y%b488٭jV`1Cy8T3q9 *>n:~lJEY,aYpj4ne[oM>?f̐b7KddxovQQ]~tt7򣟀CmQMw9gtߑ%¤8TCwu?`5…~QZ;sFY6l<Pę9oP)qG?ڨC=ޟ 3`IEap/>~*wu ~Q}{OL;h>jGȤ~#gq:o~)D?ڨC=.K=R1}oBi1@a[ݽڰ={OP礃?]c&ġ-ճʆq6})# |f~Rfflk0KA?ںC-gIhwl>ӧ  0`kjWXo U& ]{P_aʔgCBn6Gp՚3gNXn͝RV,THO$ݐ/J\Ȧ| FGwa˖MCaK epxt' 2`k.n;| g|ʽ8TfN:Tgsϝ;whlPbgPI_X*!_OM}>..D:0f̐jm4sJ9&.'1\4{V'^봶Ŏcp7E'dwck̤T3C-(Xص "²pdߟݳޚ}̘!ĴxE\no ##d^11wegC(K7/BTTYn\r.\;vqpϡ? 8V)CmR¡ϟC}sAz7jݹu?_%]}8T8TC%O[9Wab:]=l|m 8rF?jڰ|9G'[:P9g+I׽5gKAFaá5 CDPuL>Oyw92?89QX;TC3ێ1 &+E?nsFwy|Fa,G歕`Z*@?ڨC%PXs m ơ9Ρ?w=w4u^1bmÆteG:Y^d^5i\MSL'Pq Cm+'׾{1ȌΟCUj46.,\Ç?i\)+Sc\leXS>MLvG~J^'Sqh(FS .9T[iNוn jy֔A1pN&kCu9W9T;/^,=-ءyʡ~MLvG l'&A*Z$!FP ɣWp]QRT&ZSS~EdGHAcc7\w\4%-DEu).&N|^}56rL~WS\\zE=5+#7YjҲW_OJPNc|KB=iZ=VZ_SR CaqCR^vn4;#(/m̘!j(ImB('gڔI}q6-H'UTzhoYl9 h|N=-(,w/…رC.}U ɣ22^Q&r )df ڒ_TKJdT5\>RL[oMQW_Eq)/[6M۬ə3'8,6մ'Wx?TݏW__QSSWEjW5q(,wϋXNnϔo,&w\ѵ/]&>@Nm+; "UX)SUNwT7 MW2QzrdYմ-':󢣻tHI~r^ʵ%b=0e7>)%JN%5q(,wE=zDoH-=B//RӫWJE2E>b[Տ>#؆_VV |E|˿"zeW36~CmX>}Q( qJu:u3 tuM/#Fh譭q, Cu'׾X@aipqCݿ?[NnNJ\e!Oj;_-CaCPH^'PX_'J(,P &ɏLT@aCHCͱk@?p.DNml}:QQ]0(,P\?]TC&6] &jU}t= e?Oh(kjo|$}k.((PL[ IDAT&P6u_zWjOoA"-H!7w~={޽wZ7)++D:v .*Zl,6Քge >P|3bMBF4l}MsCa.^,(}u4Ozeڂ^Cw Cw&/ FHȁG8]QRT&ZSS~eӦ/8B bc7v\4%-Q~q0qSꫡ)-HM:忯^X9J'{:{vkV#G>n,6դeQW_k/!PcKaav,%ݨLj˗&*"J55=MS2_T2 j[NXQzhoYl9 mxcsC0 ۻ~74'F.:ujK:5_Cj3zOMFx6黇~]Oc]U)wF+DG2d/jkKW4|ɒ|ԙ8ޚ6eJmR^lY3gNpfYli!PX9+W>={PASlˆWRD|WZm\[xPeNwhUp(qXNnTnz;w%3 m:ujKL]r+SDzU>2Y(Y^}56ueAi[&PX9'+JW m_in(|=*KB£&OeyqkDS`k\zC?ڮC. I\CBnih/AT~lfa""j% "z'#}чQUq^חvwi JY{A]veW36P}ڡȖnܖ>I m_in(Ddds>&}ЖGie^wR-G847L={'mwhU тS񮫛|QIPnn/ PjK*Ly^z}^4يmRpj?- Cos #cck2naqǎ[@?Z١vGHÅˌG*R8~x PX*A8T.E:G7mD7xbt@auɡ?7hPݗ;a#n8` ңsWa"]5 UDPyٵ+71z4:-[Ȥۆ93(,PIz嫔t?mm̘!552̐[؃tI񾡡)rD-QRhPq~ru'$dM8ԶPsr扚)Z4É'&buI)ѓ~kAA<3HXWdD}Ry<)%=' * "ŋb]8T*Cm4ug>̤P>RG!u¦}2L P[ݡG,]6hPOqߟNnNJ~ߕs~ۜ 0yyء55(, Cšz$>Lq UC%*@[?ʅ$uK=8CE*i ۡ:|S;ږMFGw72K}kH䯚I D?*iQX8=es?t,lGLr+lPIcԢOU6dQP=o~$2"RUL2YiY{$,9Hܤ,DV+O.TӴ"'w9TX(,x9,U( P\/Su~˖MSҋȧj2۴iJKrTB lUdB?ԡƚ@a;j~~?sU(H1Cݹs!QQ]ڤ+iY梞 UzfzgϻPOvFjQXz˜48T4EÇ?f mŋMZL6zkn˞ʏ~C%5R¶gJ[8xСv\Y%6_׊ὡꧢi11wwzGWZ{lO'>œR'@:TX#(lk tubREP uL-hEI6 O$Ҧ,S[["&U7˦a÷M!'@3:TX8Tԡ];u-$& C%5 \]P>2աEϟkβl'P ևn;ՎڵbȏP w(,^zu#W`۷WL2%$$$00pĈuuu6m^vjʧsQKwމٳlc;v߶m#Lccc̙3aaaj|*?A8TdPvqqqgȐ!ZgQߴiӤڹs_>f̘$6SRRR_ډ:T1ovܹ{6X^^Xj8S1jD1~p'C?pqxޠA:w_n|Z0L foeʆ QQ]oKMj/!ʎ6 /;СCX,'OTR U+5T+oSN6U/U=/+&5 c۷߲et{3ӧϺup ׮]k1p:in"Y}TVf]ڽ`9SPO_kկ8gbQSڜC9W_=* P"Tٳ###frvvTI̡gStL+.?c'9ٳ_Ǧ3%*+ԉCbBBnQj۴iJKq~ɖ#~iPR8ʕᅳ40[̍@qTv$b0N*+PsҨ_q{QXq.]އ6k֬A)رcVզ_= n8Tsk֬1qF$M*g+()i&lũ}Hg9~| ]&g:c'9T%G Mj9PT'jk~KIp/ME22^嗟6džJCai_SNEGw9!2ݷoKʮn>ڼ|n3 իIIIߓө,~12RV111j5ig!!!s1[c 7oL͛7+ϡX{wء%%K 7a{[TzRϞw_;6c'9T}94ۗ)v<\XmʕU0CeWٲq]_V5p 7}QXĉb+u7nlV*QU%S`[Cq!x)QQ]jj)Mu ڱcpeeaj{eW__!riv~2;wىedҵ?ʮ^9sa͗_~ZGmz[<˔szE5Qe) JkT6:8'99n`ݺuN5C8gJOƥW-N[t,=}=UF'1ʕ3DqdX-QQ~' DO8G~wZezjʕ W]y Nlb:;qm2>U>̓d ͕PKϣ0p8(}͢EBCCV){vKLȶ#MFI 8m1&m áCX>}.Hes_^`PۈVoVk $5hHN7+n>nlyŶ5_* 8OCCyui黢#IJL4YQaW|5"?Wj{PtdFhi8񩢢n0gLD/!?~|ZAAA2bOLtҡ;NE,wͳܡo#kV dMX-n>tpY|HLp'P ( wJw{ǎZRT{RkO% $G ՚sO E32^嗟6y3u&YDV={}olb1CwɝJ~~g֯(,xC5F j:)""HHA5|snyŶ5_* 8TU,ڿrlPm[ :Ӡy*Bg[oMVCM`ho{UTBCoݾ}Np}@aDõfӋ?;kgI|iݜyvW&&8TpW__!jL8gJOw~]ߞ/Z?WsCbCˌWvdBԫl3gN0>ɞ=nBe>=*VPPZ3Ϡ-]eeTKI:y‚9TVoVlzz997O9T=4gW~=)- VbjkKDGP&.Y25""5~)7I>]اO͓RXf!_٩ϧ>\yRJTIjOEgOz(G<{ךڙ]t/9yz;M 2>=3r[ppNe_j6sڭpoq"zm>=)F Ǚe1CMK_'ݬT(ۜRv(m)vB?΄ jK?,m)6¶Jw 8P?J(ֶq@?O -\w#*~CEZZºUg"*靚^Ozk4_PP`@@~͝Y#y+W~7!QCM?Vߊj&Fa=Ps2P[lm#$:…i[šPFEuIOT__ظ|U\\2}ÆtO>Yk;Y_%) ?Nnszcv*iYbRzkJ ?[+o[lFҾ}co9){߫T7eʳ{S}]MMq(솏:|-4X[[ ]*.Lݕ+gy_0-텎-TvrJN4\dÚ&P?Dzb

EA=9?9#㕘^~i'VY9vx>@mGl/)~ k8T CՋR~~4Qh* tCk7[{6L'N:T9PWg'PkzUKL9CxS(]:RXrMd%/4ݡ޺}2ֱmơWW{ʡ˪w4\ԂUܡOkĉO=Uc`jKSW__QSSlvCm Ν+dgdҵꝚa0+e庉t7EwJȮ^7!DΟ ДAUVf֝b1:cX6aYnh**VGEuQ23iD2n LU*nˁ{D% lx>]اO-2E^O6QM7DF7_Q 's#G޽k:W{iAJٰ̈́GS,) +dȆ5Q?2!RSW.PtzQZr Eu ˆ۾v5`3kmGb I7!3m9'grު_̗cMde>I~jLgBh[SFEu)-}=šA>['8xġ#A3 pP m:G7qC%iWϡVX38TD?Fd:qY/G~P3ٜlK6;q3y Я}:˗ֽ{ s&$RAZw=((G~wZ=BZR6l3!Q"5p[ g^âsrY,;T?W>@?C%PXސ.+u}߯~C. h)18*Ki(~p@a}ϖI0P yaEaf1P1X7w<-\8˥ tN.(8xCEWVfټyƑrֳ|b[8'P'26|C4Ԥq'_M.}֍;|1̙BCoݰ!}ƌD3Fa=qhC磗Bffrw{ :P86eY&aqW46?RϝQvKQ␐[Բھ7Faƫײ;<84ݡV8ʕѱc'o8T}8ìw k_dg}8l)^*k,s@ɆeơUS5COs;PA꣓)qW|UL]MCe(YvFa^cáTo:~xCɫyɗOlk]RQQ]' yr@Ufk51t :j/߱>a^f<ΧxևƝ|@e{=o{Chءl :^Py ‚8T@?١z*?5P CmOl [#Os 9TD?ڼLEa]dZš"n rX 7EGw[prSog!\:\m(q#OWVfټQmYipys_޸PP7w;|1N@@@="W٨SCpïȞ\o3;@mq'7wΜ9!88(4 gHT99L=9]84šK"A<*P Ӣ ئMsPZH>ɧ*8EʬȰvظaϝqKQ␐[ԲھI(,h) 0PLD n~,n'@9TJ>5lj9_NI cxSDa=w(jEmYI(,hl)cá#BdHI~xCxԞ2j"z񵗷%//_sܡMDaAKYodPUd;Pݓč@?Z*q55V`U~"UhR a;WVfjS]kwI+*VGEuQ&pظaϛ(&me ơ)k; 6Wݓ͏ h5zx^ttZ]-u ~F ?MM}^cr kwa媓ͳ7yEܤM5'grl6UdGGA^ܓ͏ h5J(,CmJ C%|x'j۰ CEJ(,uJ''Sq[ B:T@?p͘v3 |qpۡziL$%\WpF>)|Nor((l+rm=ʕ3Giq'5Pn {Dj&~xҡX>}QCء CPbnp~P 8THX(~P p~P /v-0, ӌt³! P{#^+cvq'QJ(ro:8P *;Ԇu_}GŒ]WM)[Ax&Ba=z˭Uyq`MSQ^MMqtоv}pF9-텎-6lHo|-`ff4NnR*W;T-eeek֬YidKֱ*CN?x#CSjӦR(-}W<>Iɧ*x$I/56.,\|$y,`b+W>P|ơ۷O6͛7X 9dJ.YǪ@۟L]8PIRT=7PtS{ gCUZh|-P|ɡʪk.rssxYdKֱZC84|d*Azb=h qE pPe߷o_YYY15J+Ydtۡ{Ԗg4GW[W;C oh^9s HP|ơuQUUus%땬]׳ճr2n8󢣻tHID<Ȱ7l ᧩+U0OJ':3{S曜l{ræ9?PYeMYK.uƫ6 .3&ȎVUUUG[}k%>@7m|()˚,볬l݀C/:3/hA.Cֵ  j^#sġ0*@Q_{as_Tx#ChěV;ɋ*n8TP)8/1p8T. 8x9uN]q"k/0̎/8P ;\[qZjJt pp2 {-bLŞV28T6ĩEe8x'rPNPWϞ;4qBk/X\wx+H}q 8T*PG9 3g:] NDDD@@@Ϟ=?c{۱cGxxm۴ggΜ SCyw{ "yM6M;wcƌIJJi3%%E.xCs)_oΝۻwoÎ剋3#ILLC̏ 8T_uP-ɓ'BCCb JoԩCv ,CUˊI 0Xlb3Ln:)8qB|k p>PNTtw^:QfMf͚Ν;93ӿQQQRZ ,*pǝtE=OisU[-<<2iS{Utա*3$$D:гgӧGFF~C^wAhLS9nUҥKuuuqqqPf͚5h Ń;vjTv'!! *uy͚536n(Zb?7P}N-*u2ܜ+?cpᲓիIIIߓө,~122}111j5ig!!!s1[c 7oL͛7+p>r2Ͷq'nN8!YgqF~n@Sjx$x/roγuuu?w 4֭Ӿp S9n j.n\ő)ZXhQhhhPPjU0q#""v/8T_#t[8 .Pwte E3Pev up8TPZ Gu8|d*C =/뎝f(pT/ȵSaE<@Rơ)Ԗ g84g瓓ϖ18T uNoätҿ=CO>\mk/08T$O^t(m8Tl)+Hp-ܸXSq*>82oW38Ts *Wk̽/e08Th ^KsZm+ Z^$2A{OG~~1CCbτ9L]8[43Z/V=qGÅ< C&/25P[R֊zq*x 770q*Zv Т4\\S E7~3ſbR8T/Bv&_})*9Oה#fP Uv7n%_n[C* Ρf,-++۷oѣG/]2JPO[\\k.1(Pp-6>[W~~`_Av(SL[t5k6o,&u߾}(Pp-'׼o :5fo]w>-3zK|[[s&\bܲ*p8fWv??~fJŗ5gKAeǽ.uʕׯ/..>z(8TjZK~2~J{UY;,yx& C6WNXOTk׉I-6CP\>6gRt=8T*OG }=|ן†P C8.mΏ:P%Wn>C*ջbN:.3j~LiCš]Qtϐv2 z 6Ӆt055G))Nn1833E1*6ʕƏOVXՈ4޳g8Idֶ`O{((XMZ [l CLd=zcw|sΝ+w74T%wLձbkkK+dUQqq/J  W0biDڻ&U~ڛE?,*Z,IIq8TgB}U¡CՆrX؏>p>T6BCoU+gC&ԩ-j ҠU~m?""’R]]U~*Ρܹ_ڷ5Pm6'd?NHxS{,(XCP}>ss74TJYs3s;Tsp۷/S[áPp>,H-#G>:<>T588٭z;(H/_{\\>T _0^?j8lq]*ڌCuޡ:>Ԃ]Ѿ"", N>1bIg%22^ YDb01n8ٳ_+)-3D߂~ڛEvQQ]:DGw*?ڼ9tSP C8ޟ /[C*kK'׼MvS*(]O4y#;,6;G@D&p8TWvx*&a*Ti}'n<_6 P5Q˫)䉬,@Ҹlhi`5MQoİi`!B6@#DknRfӬܘ=}̝;M<߇g2̙ssf=msla*:}oRӿ{䅧S CP7~O艹[?ZGQ7?P3aSC C!w_x{冺2bw}}C C% CP CPC% C  \ ۋ%UhK1TPP b4 *@XCm hoC  L ߡC C% CP C C% C@C3f NC`*4T.`}C%0T `ϡ?dިz?4(*Z?XȟmmWDSzzZ2T>iҧTu}ٖe)`*`jRѴ?C' *5Ԑ[Wkvt3곲7]/,\mo-*`Pϟ?2 N)ʂ꒽,7zʟII [wJɾrs˿jmI PCu:QViΜP0TP>( ュCPCo.`Z *P CP0TP0TP%KV? L 0TP0T6-…c5/]c% aͶ}͡ 0TPF;f):ڧ^5.;g,Ŋ7V+1A~aʔ3n:{vu--y11uu/5vMx"n}B7TL45ui];DӱU/Ե~!11Mo. 0T{z?$ 'OIOVeߓuΛwզjm8WZA-?ŕ+o파y&Μ|.[vڗ,.W 7mz:nu׶Hz4XYEZ%krr46PCZPR)"bkTHO:*NCu68~VXp;ի?JZlF]Cd{4myfSm*`PKK-*+$k֬T =Z^n/nT+WZkWqkWm+qPHݻ647wyaj?cSwa-r7֕[[k{lum"ƕ*`Py> Nܢ+:3OjjJeƍ~7jeFi Ѿz^8zt,ww7ʁs%:d;+Wޜ?6s̘1jͥKu^oTajYb[ 6muzl};rƌ{Zk 0Th"Ub!Z/&x>qb̙ C xJ^5k2\y3&{9 CJC f 9TP0T 0T 0aK]]* *03䉗.չAcLt±* ~;#"zQgda̘1fԔNUYnm}TXB1&&%F͝DnT٘^SR6IW-;g,{,.~R5[#5/L8cMgLP0T~6?<<|ݪO#ic& ͜|.[vEq;DY]>KT_'&5W |}daݺz_BGj]\}ȘZkCP=|eɒ;:?EŲ.D1V7ܢf]Nϩom ͞}?: em5Zi`}2Ԯ[>1mkdmR/%JK-Rrs6ּ~,c=Jڕ/P't-}ōjUKdCg Uw~:Z `ojF3wm>#c^MM.#]p,5ujeSaQIux5;;LIɆkJ6ONd>q!DUcZk-f:& 3&>~|qjѣ;bcQ?ӺpmN'RMM63͝?6S*Tk.]6-*,\-{zZT=II η>i ѾƨAkXX/Zk1k3 Cs"v+y>}33Ϭ/#.nЪ C}J%^5k2\yS70<iZCPP@A0T  Ρh  hPz |>v/$6ڬ*NNFNi|lz. Q CPEn=ԴW'bX^vWSS{~7.s>C0Tp4TO(:zWubU6ioo=Oaʿy#"ƚ^T6&&Z^*vh^jy웍S1T C`qQ7eu,^|uKE!'A}q[>{F2))A?6i%&)Uj)Ӧ%I 0T Cކ*n'( ==-Z]Gm%%RS_M{W;PW$)#v;wn6W^yFߔ* jӊf_ UGWi7AHKCbLIP1T  \ uҤ!WTV+ʾ__莓'H"%%d/C0T}pm|rJC93Y\ݻVPg̸ CP0T2CǏWkE]iS*%eԝRbiiDDwg+ERR U)%-gC%AH" VCP0Tp1ԑeeIhb* CxQƻ՚0T C% C _!0T 0Tp6-qqFO0) 0 5!|P9< e`nC{J` CcҼjriӒި{ӆdN60R…csJ;۽Kٳox"_YֵVI{zT9?EP'VGSgI;OݯmIA&0T 0T첐`Wigtw+iYĖ Zpaxa5g;mխ[mkkw 2䉯GcU󫫷Bcˢi-i: P{zZٳo~\^ﮩ)SK9B7ʨI3JUIG]Օl=R`)tږ4dCPC `bڕ>+3sCwnY]ghFPWJ:;M3TkI~Ҋ`n-| Yx11ں::)R!eno$U _ڳnzvvFwws[[}VW%1T C 5tCݵ+?11LN~O)QPR]"> ^X;gNSTN))jm}=4C=HJdĨ)e]ilp{{rN)onrW-b*`)5QVI<〡`Ї%a|`jJȌ1"8%0T s 3jSrC *ʊEΟ?pax"n9|ezm))ިsS[Z*ʎjJR`P'?{ioFƼp2cm˖LIT+u~o`AjUUiIQw?OO":'LZ^w?ǒsSr4/>~,ؤ11HR(N﬒7~1VYJӭmp~UQUk6n|T&BދiǙPC!{J%@kd:YKK8c1/<Qk/sݺuu/D 'Vۺ ڝUz>'4Erժ%jY?WߴPmpoGUσ`MB> *03Լٳo~l嬦T|HMNdZg=uq\dCj[ӳ|^R6qŠgg̸ʕ7n64TGU_ދuՐ3 0 U]gefηjV\ܸ'v+'G:/;Lp<%C}ō\.kw=\zQu6Ծg C `uK/_~C{@LLjYVT9'7nʺ߽4,Ztɓ{f &`<(CW Wۖ,.~Ҥvm]=CTQZC=v 0Tafv'&{~.gC= *-IIlS*q&0TPF(+4gNJd.Npax"n9|]W2%QMFZ^ٸ%wzyCh^oTJ;rSU6CPC kCuxVjr'{{45ȘWU@In5z[S(ÆP"/qϿb*`fCUzWX]…cgDʿj%ƍ&~lZ1v„kjJ D[[z ~L;7UM_c1\KLLom}ݍ8e|TK},kͻՍʢ11Z߬a ZoZxs'mߑ)a%ks~*`P{v.o߷LI_Ysɒ͛8#BV}ږWM7;4`&3Cˇl9_ 0Tj:6Gʺ/P%**5>k PPE2$=gdXϹ$&&:9ybc8(`Cvu`Cia>/<`#PξL겼t<`jL7rCPPBii^|x|u3fN7_t?/?t܍5n>x WGGUUoޜrxCPCu='nf<7͜n| y_4NZ쥮k? j3QzzZ39y+Y7jh-hGpxCPC `͜KC Xyjn>4T織O`*`YMf; o|ʃ2Tt󡵼pxCPCκn0mgN7N~^xkR͇>jY C 0T 0T 0T  2.E`*``PqO%0T 0ʠ/:uY^zRR&Tߗ$CP0˷T<r()  >S6n|덚>觙ʟ2?i&\_SSj:(էx"MKjj\ :TɥDRF-,\틋WUUysZ>th*P'`0T=ӂ"))ZgfmhYPK<󝽽g n URWRl5zYP'`0T5jOOU+eA%kjJŴԔT4Ĥ.`[5VC  Uk\֛;I`*`gM$:ō;qbusuv6vPV65MMڗqS: C 1i GK+*ZU薖 $CP0˷T2r()  ?dިZgy<Ӧ%555)UEmw#"Np}MMr@5#%rۖ|jah_\ܸ͛sC&`~ vzde*H+r)--II }?{d۶܍^^F/$0T 0TCbTbSYk\UlwdR8_}1Ԁ۶C ʜR   CzLH* *A`*@j?k   %X%0TP0T P0T4L~B^[[}z,'rڴ&:ƀmwzhmLLt|,-͓ʙCPP)U]M_N.r6ԭ[=S[3))~ϿM +~U~| U~[]vޯ_X$0T `( #))=ȼ,h p"II [LgjAJu_{uN~77w * 0:zlӜ9) }xQK* *J*  *J* ~XR,zĹGϛ *~S[{^UoYz> *@*APC/z:UԖxz۾yvSG^v_2ks*gEnEo*`aʩS>ꫯy~Yk9=7C?UQޥKQʷ`;S__AI#`WUrë/=_v C.]$w9uTX?V?CY~s?ͪk}|dq(6  \.헫7PsÕcȚs29>m.ggial^~KQͷ`|#'9x׮Xyc֪|t_9"GC@|Yϖz /±X_;z~tߓ {ԟmً:9>-L~@8&*?'=1q@gWd7dF }EiYR.Es9,ӼYo9&* ,wO7 `0|to~AP~x ` c N刺*IOw~Ρ/Ɠ0Tf9*gWO:rfCyw M 8'0TO/u_m@_hP`?J(>x 4L ``,>[9˲Cg(8``9 KGW0T|oq0T/n]9CgW^5N`G.iҲYgvs ӣ7=(j#aUbhOӄ^xa|]:sPF=]I Gx}߆[ao~ߺA> `X;OWZdAԅDDD57nz=իWMuwʒWy^~$&&z<3f?װG&L'K֝Λ7ϨJot{\\0TaoBEɟ#< 駟bo?+VYTg^^l.奞ŋMH$F6m۶ٳg6ȑ#bK&OݩSJJ.# W1>>WZ8]XDo;a„QqךLHehBȐmXDDıcLtΜ9,~ W Riɢ%fVoi嫯zuם:uN199Y`#ʄ0ӹ:c &ۿCsuRa&&66V Saƌַ~`aͅu7!;ŏ?ի;=s .T_"++TLYx?dɒLH8uu]ꫯ:T([ٳg}@?~j\lsPF >I-O?]fZ $%%EDD껶u1';;>W,!!a;<.(;BӲ> `Мw y3~_ ./^p8~F]:<ᾎ\0T_\2T ͽzJFÎs=sPF>')Q!lΝ;|>_VV~QzϜ93|?O7Q\Cw~^3?P ~Yv9|^*H]?߾0h֧qPF22 * * 0^W42>Ƕ9*Hw4g}0ʡ{:rC oԼDm b˲C9* 8 zKG9*p峫]G2>zi헖b `�y,;qP=]9x>i bs9*zfHp9*`*`*0Tp' a˲Cg(8`o:zӃT=ttx}WqC)u.s=pϟ8`ȥ#bL~m vsF!ʑտ*@xqasqQ|  S;;Z>8Z:rp0TP0TP00+m olt^extcPzml ;6Ó:)m &>,)LCexúz o??Kzm0 I n *) /ז:uw9wܥK>c'A7 HC|ԩStxҙڍCP#>zi隣I ~[$%-8R6"S{eAן;wn O:3x1``7'L'_>ÓL`*:Bܯqo6m511&n&\Y%.nfZP0T u床ڛn=&K1뗎\IC9T9!|Pm]tcCP1ԑoOr.r?{o}c u0pCՙvu>6C C}zGQѤλxv;}>y=_ik/e]{bbKKTӘ`WUDG$dA6%&K3fjѣ;RR&{QII w?= u:ƍ^#ꡇrMS5u`Ùq!{?lT/?ω欬qqn}vZGD`H9I%g2p7wl;TSWj\H2mҟC6T5dAw.i.G L CPGP]&u7zG ހ]rnFd+=܆'n`ꈒԦk&-nqwKZp+tf1 b#75o<g󍛾^H='n`ꈺr]ͺ/-#QBr[˟ttwڇ3Ӎ1T C%R;ÓP0T J*IЍC =|{|[&>,)LCexúz $"4C%aݍCI<An۟s Ob8wcPCOЍ *A7P0T H OCPH*A`*Jv'`0)p̘1|Dx*ÓP0ԑo.` t.@%H OCPaOҐ R;Ó:'iΟ?2}^oT~ _23(*Z?Xȟmm( kCex*# H61T'`C%s0T H OCP1~IKܭf{Ͷ5vAjgx2<1T $&'OtnIMKpؐT\fp>2<*g;fZnx"(]@?43>UIbo[[zU9|)Sg̸ߘ;/$&AjlkۤU7VOjoLo_BeLLt]Kj4rT9[>`*VSS:մwdxb*j-V]R_))UZRԴw޼[ɓ{gb&P&I"+U_qʛ};#cXB++ȶӜM{15}mRmrºuKMZCvGƆɻ937**]^/Iu5e!7w>8bRdMNI O CP_$!EDUJfR:{\sCݣV3+V,Rꕲai||Z6$ժm7 ]vG]g먷,yz;ի?f*Óቡ`FiiޢEw78bJR;Óቡ`y>pXjT5ݶIgtŋܷj9#c^MM,H\v)vInrM)䥅 ӺN˶Ӧ%ɂI.6n|T_QWUvGƷ/Ut1}q!3L O CX[㉔/hʖop*Sߦ?n2GF-˂X*Μ6i$ vƌ{IU/.Y9:WTZ^Tź펌o_2SK${Qe}OOm1fv'CP1fo5qRAW;9@GH O' BX@8ztǒ%ww߭T9kfMJ O CS"?R;Óቡ`0P'i-CextcP,fRG.LOdZ-R,HUUqrD7*5ujmNC|uINmP r5%٨3MM{3{X^vWSSxv%s‰*Ón j8{xa$I!^oT~& T94/>~|DXދDKUU-{]KRR-Ϟ} )[Cex2<1T Cu,gV9A m,^|uK%'kCN΃لƽoDo^II jIL-15I%׸q_x<~;mZRS^R 1 I`(5TIjJ`FGm%%RS_S{7Zbm9wnN<%e2)I$0T u$N''t]/)PGWi7!.S|jaʔDJR dxb(5In8zt$:خeE8yrT+dEx)P_F,,\m[27w'R'ifLV+ׯxI3n"ak O' RC=th{|x5G+bLx"򲂺CjZ-c}>]w}j}AJɂV )P݊!-TOKJ7"wwv6Jfȶ @" P0Qj##6͙LF $FӦ bCjmmjMh I7 C%R;ÓP0T lP 1`*A B?@g"}Y0zB6<=3Du^oܹ--b+ɬj80UB#"UUeLNӢ2R0T C40yE[qͽgfd6k[?<( v'.&1çoϟRbŢziCMMil5 C*_c'L^՚ ͝O` fϾ㉔eYVT6&&:>~|UUqbCI?񭭯zJSz,ٝiJiooph4Oڬ&1>vԃΓ;C%FuʚѾqRr|a7|D%F]K9#CPG9Ԗpa$=Ѷ5{;mխ[m% ҡdMv~:;;CYvjfmdY_В;ccYuJzrr:m}rfJIϫV-$U|^9/:$bnFq*Cåe]` h2(L2X)*: TҀ:]zzPuBPOܣO&'Ojc%R$*;}ơf8.鹤dCj}S/roΘqӕ+o|)IDATWJ;7y%yClǬVh3a)5uQB`#PƝ8[}[34Cuی CR! 8KZ8*a=Sn]nʺEw.HF,)0i oM՗) Wۖ,.~ҟJ3͜켒~c6~ûw?ΡΘqZONs CZ^Y^tkI1>ꥠ`JhSJ;Z[_PJ.\fPԜ2Y) YVJJJVGwHv|>]w}oMUzH$k9!eJ=ov%yCu~c6(/=J1; C-{&+~ u6w65"_#CP1 7JbL+K_5xZӏY1ԑ5|)*5A9J`* C%*$/|~RZB=,%푒zjJNpy_'tW]š۾߰2TmwvS'77(Gܝl?87vXw֝ƇΏwC0ԁ2ԭ[oڝvrmGGKKq99vuv.#*M{)6 dfί& /˷ w-֫eJX&3"Vy߹nRi1+e=VO>Wm? LN%DYmIf9L]š۾߰2Tmwx]t3Y?87ov֝ uW"V-D(exaKC0ԁ2TɌ/ & PԔ:cܣ+jl5Ս¸m w5{͏=@CCYȗ;:K()ِ:u]juV29TnܸFޝVI۹mKYm j ݤvbہmoXɶ~ NnGmw\1ڹƝ***1+W4 4tc:P2o/Ay/7ĉ,/vv6:c*Z[_ߵ+?=}Vf>zON_ꗌWE)WՂ촼|s@Cu zXeeun}Pmm?~4ԾL8F*tpc/19yb[[m3DV1ٍCTC^]4醣Gw7fQZdGDTH=yyYrI2˒3% eeommӮ׭[zf&iFh'OK^)) Md0))A]eY]3nև`rxrzX77drhsqxGͺ௫8_aqljlzY1bk7_Y*ajےOSi3Wf:*_OdJd CǏj VMocc.7ȷkrL޵+?11^,Lt$[""|޻ٳu}=eJ ʲmKw /^3'tQo(BVa-/)nwhE/7낿bx7r1vW`{ev7FzsWwJIrsR֧2J7?m0LgDL9lmgWэC__嫼PYZ?^blRQژhyIނQRm~ ldS #StkϾm:jƍJÌ[o8#Pu':xpΞ}>Kjds! vCɍw͚LRX 3szBxc{Sr^l5߹nRIKrN΃]]9f'D4%VdP!)mժ% :q&F:t` 0Q&$O q\ܸ߯bbh7Լٳo~lBMM|_驥Ms-8._%%RS_mvi4&v3%8cMWwCejwTg8#P:^ףy({gdA5Mo!nΝ0P0nڕ>+3s=.n܉U7Ю 1_h/W:i.:ƾgbxswry0܆ !Ga`#P׭[z{bbpGq=ORKK|eg7nʺEw]w}ʃJ y/iSI$OrsRʶ m/95UYL)-IIlE#q&F:w'ۮe>9 ;;dRLNlUީQ6! ޫ#/6͙NjƓRtcP1CҘa&zmv aH7 C% 0T C%C%b#>>Ó>C`*z^<…i>LKB5ާ_UU2 uҤԬL1`juDǏ*V+#Bݸ99zri}h|F8j}F4Of1ɋRm~ g){IJٳo6O 6GM8azƍJƬ7~ 1+WޔSd\{ԟɡ7H-/)a1Tk¸5O=PWZ"#R˲ :t$rGR5LVsc2:dh0*…iN5(`PuГYdze{{Cj3nҳpzd R&gmgUMՆ+Ϩil$e64ԚR꜐q[1z 㥇HȂБzcI I@Cգ.S CjE_?>!.ҺNgq۶MuiZL”)֕:εCwnk1gC~ ĨyVVnٿ9}_Gr!w$]Cz.|P*;`e涶PL{fLT8rѢ;N#GdäI7l PUն%ss)uUV_Os3fGCwqم 11--R,//U~ӧ@6Cկ~IB_Gr!w$]ûV&%%Uƻ c*j^noot"(ir)~׮|ȥ) +Օ;7^HVKK%"b/=_7;~T>Kߊ(O!6߶ hob̊gHwJY?b˟Бzc/Yr>[a_X;gN wJa5ԑee$+d591`j!6)a+_ tcP1T 3< CPKv'`ꀧZ)02yǎR;v'Ón *)0--_|qc+'1T'0HM3e49VP^oT~E*A9Q@: P0T q>S DL6ފ!Jc(/,D67&z,7PYš*kꚝNZP'2<*Obx*:J "l I`(5I *ÓP0T  fڏ%C:$0T 8d߂2yǎR;v'Ón *)0--؄\99Ԏ2<R <7zQ+|[[}z,'rڴ`}tvsK6c'LZˇ,',rۖ|bah_\ܸ͛sC&Ajgx2<1T @c23WWoƗ:ë FGKKERRBO'Xmݤ@K]Ke]CP0T &&nӽ_Wd55'd+%ܶSz_eCP0T 5/8ꯀ7ĉ*uNWl ljڛ:e-m8.dxb*}1;;>+~eDEqܡˈ11--s+*67KNoXm$H O' bbLx"YA%7$%%TVn1}۾ZPR]/WvG(/,*:g̔]ފٙXm$H O' bLZC)EЍ1T C%R;ÓP0T.)2P bA`006~8f$0T C8pZb&j O1 b@dxb*!?dި:էx"MKjjkJ6W\>=Q/_UUܗ{yR9s#P O CP[l_W_RJ " I`(ʟVu2UI* eD|_}p7Ha~I`7珤Lx"thoo$!%))r)CؾZPR]WvGފa]fӧ(].y>wJ1<*: uDY٦9sRHW0}ÓP0T uDUPtcP1T 3< CR A`CP1T 3< CR%mE2$u7 5HD1hJú =W*jK Watcher API Microversion History glossary ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/doc/source/install/0000775000175000017500000000000000000000000020370 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/install/common_configure.rst0000664000175000017500000000503300000000000024454 0ustar00zuulzuul000000000000002. Edit the ``/etc/watcher/watcher.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. code-block:: ini [database] ... connection = mysql+pymysql://watcher:WATCHER_DBPASS@controller/watcher?charset=utf8 * In the ``[DEFAULT]`` section, configure the transport url for RabbitMQ message broker. .. code-block:: ini [DEFAULT] ... control_exchange = watcher transport_url = rabbit://openstack:RABBIT_PASS@controller Replace the RABBIT_PASS with the password you chose for OpenStack user in RabbitMQ. * In the ``[keystone_authtoken]`` section, configure Identity service access. .. code-block:: ini [keystone_authtoken] ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = watcher password = WATCHER_PASS Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service. * Watcher interacts with other OpenStack projects via project clients, in order to instantiate these clients, Watcher requests new session from Identity service. In the ``[watcher_clients_auth]`` section, configure the identity service access to interact with other OpenStack project clients. .. code-block:: ini [watcher_clients_auth] ... auth_type = password auth_url = http://controller:5000 username = watcher password = WATCHER_PASS project_domain_name = default user_domain_name = default project_name = service Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service. * In the ``[api]`` section, configure host option. .. code-block:: ini [api] ... host = controller Replace controller with the IP address of the management network interface on your controller node, typically 10.0.0.11 for the first node in the example architecture. * In the ``[oslo_messaging_notifications]`` section, configure the messaging driver. .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 3. Populate watcher database: .. code-block:: ini su -s /bin/sh -c "watcher-db-manage --config-file /etc/watcher/watcher.conf upgrade" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/install/common_prerequisites.rst0000664000175000017500000001252200000000000025400 0ustar00zuulzuul00000000000000Prerequisites ------------- Before you install and configure the Infrastructure Optimization service, you must create a database, service credentials, and API endpoints. 1. Create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console # mysql * Create the ``watcher`` database: .. code-block:: console CREATE DATABASE watcher CHARACTER SET utf8; * Grant proper access to the ``watcher`` database: .. code-block:: console GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'localhost' \ IDENTIFIED BY 'WATCHER_DBPASS'; GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'%' \ IDENTIFIED BY 'WATCHER_DBPASS'; Replace ``WATCHER_DBPASS`` with a suitable password. * Exit the database access client. .. code-block:: console exit; 2. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc 3. To create the service credentials, complete these steps: * Create the ``watcher`` user: .. code-block:: console $ openstack user create --domain default --password-prompt watcher User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | b18ee38e06034b748141beda8fc8bfad | | name | watcher | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ * Add the ``admin`` role to the ``watcher`` user: .. code-block:: console $ openstack role add --project service --user watcher admin .. note:: This command produces no output. * Create the watcher service entities: .. code-block:: console $ openstack service create --name watcher --description "Infrastructure Optimization" infra-optim +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Infrastructure Optimization | | enabled | True | | id | d854f6fff0a64f77bda8003c8dedfada | | name | watcher | | type | infra-optim | +-------------+----------------------------------+ 4. Create the Infrastructure Optimization service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ infra-optim public http://controller:9322 +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Infrastructure Optimization | | enabled | True | | id | d854f6fff0a64f77bda8003c8dedfada | | name | watcher | | type | infra-optim | +-------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ infra-optim internal http://controller:9322 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 225aef8465ef4df48a341aaaf2b0a390 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | d854f6fff0a64f77bda8003c8dedfada | | service_name | watcher | | service_type | infra-optim | | url | http://controller:9322 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ infra-optim admin http://controller:9322 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 375eb5057fb546edbdf3ee4866179672 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | d854f6fff0a64f77bda8003c8dedfada | | service_name | watcher | | service_type | infra-optim | | url | http://controller:9322 | +--------------+----------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/install/get_started.rst0000664000175000017500000000203700000000000023431 0ustar00zuulzuul00000000000000============================================ Infrastructure Optimization service overview ============================================ The Infrastructure Optimization service provides flexible and scalable optimization service for multi-tenant OpenStack based clouds. The Infrastructure Optimization service consists of the following components: ``watcher`` command-line client A CLI to communicate with ``watcher-api`` to optimize the cloud. ``watcher-api`` service An OpenStack-native REST API that accepts and responds to end-user calls by processing them and forwarding to appropriate underlying watcher services via AMQP. ``watcher-decision-engine`` service It runs audit and return an action plan to achieve optimization goal specified by the end-user in audit. ``watcher-applier`` service It executes action plan built by watcher-decision-engine. It interacts with other OpenStack components like nova to execute the given action plan. ``watcher-dashboard`` Watcher UI implemented as a plugin for the OpenStack Dashboard. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/install/index.rst0000664000175000017500000000241700000000000022235 0ustar00zuulzuul00000000000000============= Install Guide ============= .. toctree:: :maxdepth: 2 get_started.rst install.rst verify.rst next-steps.rst The Infrastructure Optimization service (Watcher) provides flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Watcher provides a complete optimization loop including everything from a metrics receiver, complex event processor and profiler, optimization processor and an action plan applier. This provides a robust framework to realize a wide range of cloud optimization goals, including the reduction of data center operating costs, increased system performance via intelligent virtual machine migration, increased energy efficiency and more! Watcher also supports a pluggable architecture by which custom optimization algorithms, data metrics and data profilers can be developed and inserted into the Watcher framework. Check the documentation for watcher optimization strategies at `Strategies `_. Check watcher glossary at `Glossary `_. This chapter assumes a working setup of OpenStack following the `OpenStack Installation Tutorial `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/install/install-rdo.rst0000664000175000017500000000203000000000000023345 0ustar00zuulzuul00000000000000.. _install-rdo: Install and configure for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Infrastructure Optimization service for Red Hat Enterprise Linux 7 and CentOS 7. .. include:: common_prerequisites.rst Install and configure components -------------------------------- 1. Install the packages: .. code-block:: console # sudo yum install openstack-watcher-api openstack-watcher-applier \ openstack-watcher-decision-engine .. include:: common_configure.rst Finalize installation --------------------- Start the Infrastructure Optimization services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-watcher-api.service \ openstack-watcher-decision-engine.service \ openstack-watcher-applier.service # systemctl start openstack-watcher-api.service \ openstack-watcher-decision-engine.service \ openstack-watcher-applier.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/install/install-ubuntu.rst0000664000175000017500000000162600000000000024115 0ustar00zuulzuul00000000000000.. _install-ubuntu: Install and configure for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Infrastructure Optimization service for Ubuntu 16.04 (LTS). .. include:: common_prerequisites.rst Install and configure components -------------------------------- 1. Install the packages: .. code-block:: console # apt install watcher-api watcher-decision-engine \ watcher-applier # apt install python-watcherclient .. include:: common_configure.rst Finalize installation --------------------- Start the Infrastructure Optimization services and configure them to start when the system boots: .. code-block:: console # systemctl enable watcher-api.service \ watcher-decision-engine.service \ watcher-applier.service # systemctl start watcher-api.service \ watcher-decision-engine.service \ watcher-applier.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/install/install.rst0000664000175000017500000000102000000000000022561 0ustar00zuulzuul00000000000000.. _install: Install and configure ~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Infrastructure Optimization service, code-named watcher, on the controller node. This section assumes that you already have a working OpenStack environment with at least the following components installed: Identity Service, Compute Service, Telemetry data collection service. Note that installation and configuration vary by distribution. .. toctree:: :maxdepth: 2 install-rdo.rst install-ubuntu.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/install/next-steps.rst0000664000175000017500000000026200000000000023234 0ustar00zuulzuul00000000000000.. _next-steps: Next steps ~~~~~~~~~~ Your OpenStack environment now includes the watcher service. To add additional services, see https://docs.openstack.org/queens/install/. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/install/verify.rst0000664000175000017500000001752300000000000022436 0ustar00zuulzuul00000000000000.. _verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Infrastructure Optimization service. .. note:: Perform these commands on the controller node. 1. Source the ``admin`` project credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc 2. List service components to verify successful launch and registration of each process: .. code-block:: console $ openstack optimize service list +----+-------------------------+------------+--------+ | ID | Name | Host | Status | +----+-------------------------+------------+--------+ | 1 | watcher-decision-engine | controller | ACTIVE | | 2 | watcher-applier | controller | ACTIVE | +----+-------------------------+------------+--------+ 3. List goals and strategies: .. code-block:: console $ openstack optimize goal list +--------------------------------------+----------------------+----------------------+ | UUID | Name | Display name | +--------------------------------------+----------------------+----------------------+ | a8cd6d1a-008b-4ff0-8dbc-b30493fcc5b9 | dummy | Dummy goal | | 03953f2f-02d0-42b5-9a12-7ba500a54395 | workload_balancing | Workload Balancing | | de0f8714-984b-4d6b-add1-9cad8120fbce | server_consolidation | Server Consolidation | | f056bc80-c6d1-40dc-b002-938ccade9385 | thermal_optimization | Thermal Optimization | | e7062856-892e-4f0f-b84d-b828464b3fd0 | airflow_optimization | Airflow Optimization | | 1f038da9-b36c-449f-9f04-c225bf3eb478 | unclassified | Unclassified | +--------------------------------------+----------------------+----------------------+ $ openstack optimize strategy list +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ | UUID | Name | Display name | Goal | +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ | 98ae84c8-7c9b-4cbd-8d9c-4bd7c6b106eb | dummy | Dummy strategy | dummy | | 02a170b6-c72e-479d-95c0-8a4fdd4cc1ef | dummy_with_scorer | Dummy Strategy using sample Scoring Engines | dummy | | 8bf591b8-57e5-4a9e-8c7d-c37bda735a45 | outlet_temperature | Outlet temperature based strategy | thermal_optimization | | 8a0810fb-9d9a-47b9-ab25-e442878abc54 | vm_workload_consolidation | VM Workload Consolidation Strategy | server_consolidation | | 1718859c-3eb5-45cb-9220-9cb79fe42fa5 | basic | Basic offline consolidation | server_consolidation | | b5e7f5f1-4824-42c7-bb52-cf50724f67bf | workload_stabilization | Workload stabilization | workload_balancing | | f853d71e-9286-4df3-9d3e-8eaf0f598e07 | workload_balance | Workload Balance Migration Strategy | workload_balancing | | 58bdfa89-95b5-4630-adf6-fd3af5ff1f75 | uniform_airflow | Uniform airflow migration strategy | airflow_optimization | | 66fde55d-a612-4be9-8cb0-ea63472b420b | dummy_with_resize | Dummy strategy with resize | dummy | +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ 4. Run an action plan by creating an audit with dummy goal: .. code-block:: console $ openstack optimize audit create --goal dummy +--------------+--------------------------------------+ | Field | Value | +--------------+--------------------------------------+ | UUID | e94d4826-ad4e-44df-ad93-dff489fde457 | | Created At | 2017-05-23T11:46:58.763394+00:00 | | Updated At | None | | Deleted At | None | | State | PENDING | | Audit Type | ONESHOT | | Parameters | {} | | Interval | None | | Goal | dummy | | Strategy | auto | | Audit Scope | [] | | Auto Trigger | False | +--------------+--------------------------------------+ $ openstack optimize audit list +--------------------------------------+------------+-----------+-------+----------+--------------+ | UUID | Audit Type | State | Goal | Strategy | Auto Trigger | +--------------------------------------+------------+-----------+-------+----------+--------------+ | e94d4826-ad4e-44df-ad93-dff489fde457 | ONESHOT | SUCCEEDED | dummy | auto | False | +--------------------------------------+------------+-----------+-------+----------+--------------+ $ openstack optimize actionplan list +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ | UUID | Audit | State | Updated At | Global efficacy | +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | e94d4826-ad4e-44df-ad93-dff489fde457 | RECOMMENDED | None | None | +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ $ openstack optimize actionplan start ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | UUID | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | | Created At | 2017-05-23T11:46:58+00:00 | | Updated At | 2017-05-23T11:53:12+00:00 | | Deleted At | None | | Audit | e94d4826-ad4e-44df-ad93-dff489fde457 | | Strategy | dummy | | State | ONGOING | | Efficacy indicators | [] | | Global efficacy | {} | +---------------------+--------------------------------------+ $ openstack optimize actionplan list +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ | UUID | Audit | State | Updated At | Global efficacy | +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | e94d4826-ad4e-44df-ad93-dff489fde457 | SUCCEEDED | 2017-05-23T11:53:16+00:00 | None | +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/doc/source/man/0000775000175000017500000000000000000000000017475 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/man/footer.rst0000664000175000017500000000016200000000000021524 0ustar00zuulzuul00000000000000BUGS ==== * Watcher bugs are tracked in Launchpad at `OpenStack Watcher `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/man/general-options.rst0000664000175000017500000000453400000000000023343 0ustar00zuulzuul00000000000000 **-h, --help** Show the help message and exit **--version** Print the version number and exit **-v, --verbose** Print more verbose output **--noverbose** Disable verbose output **-d, --debug** Print debugging output (set logging level to DEBUG instead of default WARNING level) **--nodebug** Disable debugging output **--use-syslog** Use syslog for logging **--nouse-syslog** Disable the use of syslog for logging **--syslog-log-facility SYSLOG_LOG_FACILITY** syslog facility to receive log lines **--config-dir DIR** Path to a config directory to pull \*.conf files from. This file set is sorted, to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. This means that configuration from files in a specified config-dir will always take precedence over configuration from files specified by --config-file, regardless to argument order. **--config-file PATH** Path to a config file to use. Multiple config files can be specified by using this flag multiple times, for example, --config-file --config-file . Values in latter files take precedence. **--log-config-append PATH** **--log-config PATH** The name of logging configuration file. It does not disable existing loggers, but just appends specified logging configuration to any other existing logging options. Please see the Python logging module documentation for details on logging configuration files. The log-config name for this option is deprecated. **--log-format FORMAT** A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. Default: None **--log-date-format DATE_FORMAT** Format string for %(asctime)s in log records. Default: None **--log-file PATH, --logfile PATH** (Optional) Name of log file to output to. If not set, logging will go to stdout. **--log-dir LOG_DIR, --logdir LOG_DIR** (Optional) The directory to keep log files in (will be prepended to --log-file) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/man/index.rst0000664000175000017500000000031400000000000021334 0ustar00zuulzuul00000000000000==================== Watcher Manual Pages ==================== .. toctree:: :glob: :maxdepth: 1 watcher-api watcher-applier watcher-db-manage watcher-decision-engine watcher-status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/man/watcher-api.rst0000664000175000017500000000107100000000000022432 0ustar00zuulzuul00000000000000=========== watcher-api =========== --------------------------- Service for the Watcher API --------------------------- :Author: openstack@lists.launchpad.net :Copyright: OpenStack Foundation :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== watcher-api [options] DESCRIPTION =========== watcher-api is a server daemon that serves the Watcher API OPTIONS ======= **General options** .. include:: general-options.rst FILES ===== **/etc/watcher/watcher.conf** Default configuration file for Watcher API .. include:: footer.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/man/watcher-applier.rst0000664000175000017500000000112200000000000023312 0ustar00zuulzuul00000000000000=============== watcher-applier =============== ------------------------------- Service for the Watcher Applier ------------------------------- :Author: openstack@lists.launchpad.net :Copyright: OpenStack Foundation :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== watcher-applier [options] DESCRIPTION =========== :ref:`Watcher Applier ` OPTIONS ======= **General options** .. include:: general-options.rst FILES ===== **/etc/watcher/watcher.conf** Default configuration file for Watcher Applier .. include:: footer.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/man/watcher-db-manage.rst0000664000175000017500000001467500000000000023512 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _watcher-db-manage: ================= watcher-db-manage ================= The :command:`watcher-db-manage` utility is used to create the database schema tables that the watcher services will use for storage. It can also be used to upgrade (or downgrade) existing database tables when migrating between different versions of watcher. The `Alembic library `_ is used to perform the database migrations. Options ======= This is a partial list of the most useful options. To see the full list, run the following:: watcher-db-manage --help .. program:: watcher-db-manage .. option:: -h, --help Show help message and exit. .. option:: --config-dir

Path to a config directory with configuration files. .. option:: --config-file Path to a configuration file to use. .. option:: -d, --debug Print debugging output. .. option:: -v, --verbose Print more verbose output. .. option:: --version Show the program's version number and exit. .. option:: upgrade, downgrade, stamp, revision, version, create_schema, purge The :ref:`command ` to run. Usage ===== Options for the various :ref:`commands ` for :command:`watcher-db-manage` are listed when the :option:`-h` or :option:`--help` option is used after the command. For example:: watcher-db-manage create_schema --help Information about the database is read from the watcher configuration file used by the API server and conductor services. This file must be specified with the :option:`--config-file` option:: watcher-db-manage --config-file /path/to/watcher.conf create_schema The configuration file defines the database backend to use with the *connection* database option:: [database] connection=mysql://root@localhost/watcher If no configuration file is specified with the :option:`--config-file` option, :command:`watcher-db-manage` assumes an SQLite database. .. _db-manage_cmds: Command Options =============== :command:`watcher-db-manage` is given a command that tells the utility what actions to perform. These commands can take arguments. Several commands are available: .. _create_schema: create_schema ------------- .. program:: create_schema .. option:: -h, --help Show help for create_schema and exit. This command will create database tables based on the most current version. It assumes that there are no existing tables. An example of creating database tables with the most recent version:: watcher-db-manage --config-file=/etc/watcher/watcher.conf create_schema downgrade --------- .. program:: downgrade .. option:: -h, --help Show help for downgrade and exit. .. option:: --revision The revision number you want to downgrade to. This command will revert existing database tables to a previous version. The version can be specified with the :option:`--revision` option. An example of downgrading to table versions at revision 2581ebaf0cb2:: watcher-db-manage --config-file=/etc/watcher/watcher.conf downgrade --revision 2581ebaf0cb2 revision -------- .. program:: revision .. option:: -h, --help Show help for revision and exit. .. option:: -m , --message The message to use with the revision file. .. option:: --autogenerate Compares table metadata in the application with the status of the database and generates migrations based on this comparison. This command will create a new revision file. You can use the :option:`--message` option to comment the revision. This is really only useful for watcher developers making changes that require database changes. This revision file is used during database migration and will specify the changes that need to be made to the database tables. Further discussion is beyond the scope of this document. stamp ----- .. program:: stamp .. option:: -h, --help Show help for stamp and exit. .. option:: --revision The revision number. This command will 'stamp' the revision table with the version specified with the :option:`--revision` option. It will not run any migrations. upgrade ------- .. program:: upgrade .. option:: -h, --help Show help for upgrade and exit. .. option:: --revision The revision number to upgrade to. This command will upgrade existing database tables to the most recent version, or to the version specified with the :option:`--revision` option. If there are no existing tables, then new tables are created, beginning with the oldest known version, and successively upgraded using all of the database migration files, until they are at the specified version. Note that this behavior is different from the :ref:`create_schema` command that creates the tables based on the most recent version. An example of upgrading to the most recent table versions:: watcher-db-manage --config-file=/etc/watcher/watcher.conf upgrade .. note:: This command is the default if no command is given to :command:`watcher-db-manage`. .. warning:: The upgrade command is not compatible with SQLite databases since it uses ALTER TABLE commands to upgrade the database tables. SQLite supports only a limited subset of ALTER TABLE. version ------- .. program:: version .. option:: -h, --help Show help for version and exit. This command will output the current database version. purge ----- .. program:: purge .. option:: -h, --help Show help for purge and exit. .. option:: -d, --age-in-days The number of days (starting from today) before which we consider soft deleted objects as expired and should hence be erased. By default, all objects soft deleted are considered expired. This can be useful as removing a significant amount of objects may cause a performance issues. .. option:: -n, --max-number The maximum number of database objects we expect to be deleted. If exceeded, this will prevent any deletion. .. option:: -t, --goal Either the UUID or name of the goal to purge. .. option:: -e, --exclude-orphans This is a flag to indicate when we want to exclude orphan objects from deletion. .. option:: --dry-run This is a flag to indicate when we want to perform a dry run. This will show the objects that would be deleted instead of actually deleting them. This command will purge the current database by removing both its soft deleted and orphan objects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/man/watcher-decision-engine.rst0000664000175000017500000000124200000000000024721 0ustar00zuulzuul00000000000000======================= watcher-decision-engine ======================= --------------------------------------- Service for the Watcher Decision Engine --------------------------------------- :Author: openstack@lists.launchpad.net :Copyright: OpenStack Foundation :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== watcher-decision-engine [options] DESCRIPTION =========== :ref:`Watcher Decision Engine ` OPTIONS ======= **General options** .. include:: general-options.rst FILES ===== **/etc/watcher/watcher.conf** Default configuration file for Watcher Decision Engine .. include:: footer.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/man/watcher-status.rst0000664000175000017500000000402400000000000023205 0ustar00zuulzuul00000000000000============== watcher-status ============== ----------------------------------------- CLI interface for Watcher status commands ----------------------------------------- Synopsis ======== :: watcher-status [] Description =========== :program:`watcher-status` is a tool that provides routines for checking the status of a Watcher deployment. Options ======= The standard pattern for executing a :program:`watcher-status` command is:: watcher-status [] Run without arguments to see a list of available command categories:: watcher-status Categories are: * ``upgrade`` Detailed descriptions are below: You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: watcher-status upgrade These sections describe the available categories and arguments for :program:`Watcher-status`. Upgrade ~~~~~~~ .. _watcher-status-checks: ``watcher-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. For example, missing or changed configuration options, incompatible object states, or other conditions that could lead to failures while upgrading. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **2.0.0 (Stein)** * Sample check to be filled in with checks as they are added in Stein. **3.0.0 (Train)** * A check was added to enforce the minimum required version of nova API used. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/doc/source/strategies/0000775000175000017500000000000000000000000021074 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/actuation.rst0000664000175000017500000000413500000000000023620 0ustar00zuulzuul00000000000000============= Actuator ============= Synopsis -------- **display name**: ``Actuator`` **goal**: ``unclassified`` .. watcher-term:: watcher.decision_engine.strategy.strategies.actuation.Actuator Requirements ------------ Metrics ******* None Cluster data model ****************** None Actions ******* Default Watcher's actions. Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ==================== ====== ===================== ============================= parameter type default Value description ==================== ====== ===================== ============================= ``actions`` array None Actions to be executed. ==================== ====== ===================== ============================= The elements of actions array are: ==================== ====== ===================== ============================= parameter type default Value description ==================== ====== ===================== ============================= ``action_type`` string None Action name defined in setup.cfg(mandatory) ``resource_id`` string None Resource_id of the action. ``input_parameters`` object None Input_parameters of the action(mandatory). ==================== ====== ===================== ============================= Efficacy Indicator ------------------ None Algorithm --------- This strategy create an action plan with a predefined set of actions. How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 unclassified --strategy actuator $ openstack optimize audit create -a at1 \ -p actions='[{"action_type": "migrate", "resource_id": "56a40802-6fde-4b59-957c-c84baec7eaed", "input_parameters": {"migration_type": "live", "source_node": "s01"}}]' External Links -------------- None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/basic-server-consolidation.rst0000664000175000017500000000614300000000000027062 0ustar00zuulzuul00000000000000================================== Basic Offline Server Consolidation ================================== Synopsis -------- **display name**: ``Basic offline consolidation`` **goal**: ``server_consolidation`` .. watcher-term:: watcher.decision_engine.strategy.strategies.basic_consolidation.BasicConsolidation Requirements ------------ Metrics ******* The *basic* strategy requires the following metrics: ============================ ============ ======= =========================== metric service name plugins comment ============================ ============ ======= =========================== ``compute.node.cpu.percent`` ceilometer_ none need to set the ``compute_monitors`` option to ``cpu.virt_driver`` in the nova.conf. ``cpu`` ceilometer_ none ============================ ============ ======= =========================== .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate * - ``change_nova_service_state`` - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ====================== ====== ============= =================================== parameter type default Value description ====================== ====== ============= =================================== ``migration_attempts`` Number 0 Maximum number of combinations to be tried by the strategy while searching for potential candidates. To remove the limit, set it to 0 ``period`` Number 7200 The time interval in seconds for getting statistic aggregation from metric data source ====================== ====== ============= =================================== Efficacy Indicator ------------------ .. watcher-func:: :format: literal_block watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 server_consolidation --strategy basic $ openstack optimize audit create -a at1 -p migration_attempts=4 External Links -------------- None. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/host_maintenance.rst0000664000175000017500000000414600000000000025152 0ustar00zuulzuul00000000000000=========================== Host Maintenance Strategy =========================== Synopsis -------- **display name**: ``Host Maintenance Strategy`` **goal**: ``cluster_maintaining`` .. watcher-term:: watcher.decision_engine.strategy.strategies.host_maintenance.HostMaintenance Requirements ------------ None. Metrics ******* None Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate * - ``change_nova_service_state`` - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ==================== ====== ==================================== parameter type default Value description ==================== ====== ==================================== ``maintenance_node`` String The name of the compute node which need maintenance. Required. ``backup_node`` String The name of the compute node which will backup the maintenance node. Optional. ==================== ====== ==================================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the Host Maintenance Strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/queens/approved/cluster-maintenance-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audit create \ -g cluster_maintaining -s host_maintenance \ -p maintenance_node=compute01 \ -p backup_node=compute02 \ --auto-trigger External Links -------------- None. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/index.rst0000664000175000017500000000010600000000000022732 0ustar00zuulzuul00000000000000Strategies ========== .. toctree:: :glob: :maxdepth: 1 ./* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/node_resource_consolidation.rst0000664000175000017500000000436300000000000027415 0ustar00zuulzuul00000000000000==================================== Node Resource Consolidation Strategy ==================================== Synopsis -------- **display name**: ``Node Resource Consolidation Strategy`` **goal**: ``Server Consolidation`` .. watcher-term:: watcher.decision_engine.strategy.strategies.node_resource_consolidation.NodeResourceConsolidation Requirements ------------ None. Metrics ******* None Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate * - ``change_nova_service_state`` - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ==================== ====== ======================================= parameter type default Value description ==================== ====== ======================================= ``host_choice`` String The way to select the server migration destination node, The value auto means that Nova schedular selects the destination node, and specify means the strategy specifies the destination. ==================== ====== ======================================= Efficacy Indicator ------------------ None Algorithm --------- For more information on the Node Resource Consolidation Strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/train/approved/node-resource-consolidation.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 server_consolidation \ --strategy node_resource_consolidation $ openstack optimize audit create \ -a at1 -p host_choice=auto External Links -------------- None. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/noisy_neighbor.rst0000664000175000017500000000461300000000000024650 0ustar00zuulzuul00000000000000============== Noisy neighbor ============== Synopsis -------- **display name**: ``Noisy Neighbor`` **goal**: ``noisy_neighbor`` .. watcher-term:: watcher.decision_engine.strategy.strategies.noisy_neighbor.NoisyNeighbor Requirements ------------ Metrics ******* The *noisy_neighbor* strategy requires the following metrics: ============================ ============ ======= ======================= metric service name plugins comment ============================ ============ ======= ======================= ``cpu_l3_cache`` ceilometer_ none Intel CMT_ is required ============================ ============ ======= ======================= .. _CMT: http://www.intel.com/content/www/us/en/architecture-and-technology/resource-director-technology.html .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ==================== ====== ============= ============================ parameter type default Value description ==================== ====== ============= ============================ ``cache_threshold`` Number 35.0 Performance drop in L3_cache threshold for migration ==================== ====== ============= ============================ Efficacy Indicator ------------------ None Algorithm --------- For more information on the noisy neighbor strategy please refer to: http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/noisy_neighbor_strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 noisy_neighbor --strategy noisy_neighbor $ openstack optimize audit create -a at1 \ -p cache_threshold=45.0 External Links -------------- None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/outlet_temp_control.rst0000664000175000017500000000534000000000000025731 0ustar00zuulzuul00000000000000================================= Outlet Temperature Based Strategy ================================= Synopsis -------- **display name**: ``Outlet temperature based strategy`` **goal**: ``thermal_optimization`` .. watcher-term:: watcher.decision_engine.strategy.strategies.outlet_temp_control Requirements ------------ This strategy has a dependency on the host having Intel's Power Node Manager 3.0 or later enabled. Metrics ******* The *outlet_temperature* strategy requires the following metrics: ========================================= ============ ======= ======= metric service name plugins comment ========================================= ============ ======= ======= ``hardware.ipmi.node.outlet_temperature`` ceilometer_ IPMI ========================================= ============ ======= ======= .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#ipmi-based-meters Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ============== ====== ============= ==================================== parameter type default Value description ============== ====== ============= ==================================== ``threshold`` Number 35.0 Temperature threshold for migration ``period`` Number 30 The time interval in seconds for getting statistic aggregation from metric data source ============== ====== ============= ==================================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the Outlet Temperature Based Strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/outlet-temperature-based-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 thermal_optimization --strategy outlet_temperature $ openstack optimize audit create -a at1 -p threshold=31.0 External Links -------------- - `Intel Power Node Manager 3.0 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/saving_energy.rst0000664000175000017500000000513500000000000024472 0ustar00zuulzuul00000000000000====================== Saving Energy Strategy ====================== Synopsis -------- **display name**: ``Saving Energy Strategy`` **goal**: ``saving_energy`` .. watcher-term:: watcher.decision_engine.strategy.strategies.saving_energy.SavingEnergy Requirements ------------ This feature will use Ironic to do the power on/off actions, therefore this feature requires that the ironic component is configured. And the compute node should be managed by Ironic. Ironic installation: https://docs.openstack.org/ironic/latest/install/index.html Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``change_node_power_state`` - .. watcher-term:: watcher.applier.actions.change_node_power_state.ChangeNodePowerState Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ====================== ====== ======= ====================================== parameter type default description Value ====================== ====== ======= ====================================== ``free_used_percent`` Number 10.0 a rational number, which describes the the quotient of min_free_hosts_num/nodes_with_VMs_num ``min_free_hosts_num`` Int 1 an int number describes minimum free compute nodes ====================== ====== ======= ====================================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the Energy Saving Strategy please refer to: http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html How to use it ? --------------- step1: Add compute nodes info into ironic node management .. code-block:: shell $ ironic node-create -d pxe_ipmitool -i ipmi_address=10.43.200.184 \ ipmi_username=root -i ipmi_password=nomoresecret -e compute_node_id=3 step 2: Create audit to do optimization .. code-block:: shell $ openstack optimize audittemplate create \ saving_energy_template1 saving_energy --strategy saving_energy $ openstack optimize audit create -a saving_energy_audit1 \ -p free_used_percent=20.0 External Links -------------- None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/storage_capacity_balance.rst0000664000175000017500000000354500000000000026623 0ustar00zuulzuul00000000000000======================== Storage capacity balance ======================== Synopsis -------- **display name**: ``Storage Capacity Balance Strategy`` **goal**: ``workload_balancing`` .. watcher-term:: watcher.decision_engine.strategy.strategies.storage_capacity_balance.StorageCapacityBalance Requirements ------------ Metrics ******* None Cluster data model ****************** Storage cluster data model is required: .. watcher-term:: watcher.decision_engine.model.collector.cinder.CinderClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 25 35 :header-rows: 1 * - action - description * - ``volume_migrate`` - .. watcher-term:: watcher.applier.actions.volume_migration.VolumeMigrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ==================== ====== ============= ===================================== parameter type default Value description ==================== ====== ============= ===================================== ``volume_threshold`` Number 80.0 Volume threshold for capacity balance ==================== ====== ============= ===================================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the storage capacity balance strategy please refer to: http://specs.openstack.org/openstack/watcher-specs/specs/queens/implemented/storage-capacity-balance.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 workload_balancing --strategy storage_capacity_balance $ openstack optimize audit create -a at1 \ -p volume_threshold=85.0 External Links -------------- None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/strategy-template.rst0000664000175000017500000000540300000000000025303 0ustar00zuulzuul00000000000000============= Strategy name ============= Synopsis -------- **display name**: **goal**: Add here a complete description of your strategy Requirements ------------ Metrics ******* Write here the list of metrics required by your strategy algorithm (in the form of a table). If these metrics requires specific Telemetry plugin or other additional software, please explain here how to deploy them (and add link to dedicated installation guide). Example: ======================= ============ ======= ======= metric service name plugins comment ======================= ============ ======= ======= compute.node.* ceilometer_ none one point every 60s vm.cpu.utilization_perc monasca_ none power ceilometer_ kwapi_ one point every 60s ======================= ============ ======= ======= .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute .. _monasca: https://github.com/openstack/monasca-agent/blob/master/docs/Libvirt.md .. _kwapi: https://kwapi.readthedocs.io/en/latest/index.html Cluster data model ****************** Default Watcher's cluster data model. or If your strategy implementation requires a new cluster data model, please describe it in this section, with a link to model plugin's installation guide. Actions ******* Default Watcher's actions. or If your strategy implementation requires new actions, add the list of Action plugins here (in the form of a table) with a link to the plugin's installation procedure. ======== ================= action description ======== ================= action1_ This action1 ... action2_ This action2 ... ======== ================= .. _action1 : https://github.com/myrepo/watcher/plugins/action1 .. _action2 : https://github.com/myrepo/watcher/plugins/action2 Planner ******* Default Watcher's planner. or If your strategy requires also a new planner to schedule built actions in time, please describe it in this section, with a link to planner plugin's installation guide. Configuration ------------- If your strategy use configurable parameters, explain here how to tune them. Efficacy Indicator ------------------ Add here the Efficacy indicator computed by your strategy. Algorithm --------- Add here either the description of your algorithm or link to the existing description. How to use it ? --------------- .. code-block:: shell $ Write the command line to create an audit with your strategy. External Links -------------- If you have written papers, blog articles .... about your strategy into Watcher, or if your strategy is based from external publication(s), please add HTTP links and references in this section. - `link1 `_ - `link2 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/uniform_airflow.rst0000664000175000017500000000621400000000000025033 0ustar00zuulzuul00000000000000================================== Uniform Airflow Migration Strategy ================================== Synopsis -------- **display name**: ``Uniform airflow migration strategy`` **goal**: ``airflow_optimization`` .. watcher-term:: watcher.decision_engine.strategy.strategies.uniform_airflow.UniformAirflow Requirements ------------ This strategy has a dependency on the server having Intel's Power Node Manager 3.0 or later enabled. Metrics ******* The *uniform_airflow* strategy requires the following metrics: ================================== ============ ======= ======= metric service name plugins comment ================================== ============ ======= ======= ``hardware.ipmi.node.airflow`` ceilometer_ IPMI ``hardware.ipmi.node.temperature`` ceilometer_ IPMI ``hardware.ipmi.node.power`` ceilometer_ IPMI ================================== ============ ======= ======= .. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#ipmi-based-meters Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ====================== ====== ============= =========================== parameter type default Value description ====================== ====== ============= =========================== ``threshold_airflow`` Number 400.0 Airflow threshold for migration Unit is 0.1CFM ``threshold_inlet_t`` Number 28.0 Inlet temperature threshold for migration decision ``threshold_power`` Number 350.0 System power threshold for migration decision ``period`` Number 300 Aggregate time period of ceilometer ====================== ====== ============= =========================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the Uniform Airflow Migration Strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/newton/implemented/uniform-airflow-migration-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 airflow_optimization --strategy uniform_airflow $ openstack optimize audit create -a at1 -p threshold_airflow=410 \ -p threshold_inlet_t=29.0 -p threshold_power=355.0 -p period=310 External Links -------------- - `Intel Power Node Manager 3.0 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/vm_workload_consolidation.rst0000664000175000017500000000670700000000000027111 0ustar00zuulzuul00000000000000================================== VM Workload Consolidation Strategy ================================== Synopsis -------- **display name**: ``VM Workload Consolidation Strategy`` **goal**: ``vm_consolidation`` .. watcher-term:: watcher.decision_engine.strategy.strategies.vm_workload_consolidation.VMWorkloadConsolidation Requirements ------------ Metrics ******* The *vm_workload_consolidation* strategy requires the following metrics: ============================ ============ ======= ========================= metric service name plugins comment ============================ ============ ======= ========================= ``cpu`` ceilometer_ none ``memory.resident`` ceilometer_ none ``memory`` ceilometer_ none ``disk.root.size`` ceilometer_ none ``compute.node.cpu.percent`` ceilometer_ none (optional) need to set the ``compute_monitors`` option to ``cpu.virt_driver`` in the nova.conf. ``hardware.memory.used`` ceilometer_ SNMP_ (optional) ============================ ============ ======= ========================= .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute .. _SNMP: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#snmp-based-meters Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate * - ``change_nova_service_state`` - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ====================== ====== ============= =================================== parameter type default Value description ====================== ====== ============= =================================== ``period`` Number 3600 The time interval in seconds for getting statistic aggregation from metric data source ====================== ====== ============= =================================== Efficacy Indicator ------------------ .. watcher-func:: :format: literal_block watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator Algorithm --------- For more information on the VM Workload consolidation strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/zhaw-load-consolidation.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 server_consolidation --strategy vm_workload_consolidation $ openstack optimize audit create -a at1 External Links -------------- *Spec URL* https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/zhaw-load-consolidation.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/workload-stabilization.rst0000664000175000017500000001354700000000000026334 0ustar00zuulzuul00000000000000============================================= Watcher Overload standard deviation algorithm ============================================= Synopsis -------- **display name**: ``Workload stabilization`` **goal**: ``workload_balancing`` .. watcher-term:: watcher.decision_engine.strategy.strategies.workload_stabilization.WorkloadStabilization Requirements ------------ Metrics ******* The *workload_stabilization* strategy requires the following metrics: ============================ ============ ======= ============================= metric service name plugins comment ============================ ============ ======= ============================= ``compute.node.cpu.percent`` ceilometer_ none need to set the ``compute_monitors`` option to ``cpu.virt_driver`` in the nova.conf. ``hardware.memory.used`` ceilometer_ SNMP_ ``cpu`` ceilometer_ none ``instance_ram_usage`` ceilometer_ none ============================ ============ ======= ============================= .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute .. _SNMP: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#snmp-based-meters Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ==================== ====== ===================== ============================= parameter type default Value description ==================== ====== ===================== ============================= ``metrics`` array |metrics| Metrics used as rates of cluster loads. ``thresholds`` object |thresholds| Dict where key is a metric and value is a trigger value. ``weights`` object |weights| These weights used to calculate common standard deviation. Name of weight contains meter name and _weight suffix. ``instance_metrics`` object |instance_metrics| Mapping to get hardware statistics using instance metrics. ``host_choice`` string retry Method of host's choice. There are cycle, retry and fullsearch methods. Cycle will iterate hosts in cycle. Retry will get some hosts random (count defined in retry_count option). Fullsearch will return each host from list. ``retry_count`` number 1 Count of random returned hosts. ``periods`` object |periods| These periods are used to get statistic aggregation for instance and host metrics. The period is simply a repeating interval of time into which the samples are grouped for aggregation. Watcher uses only the last period of all received ones. ==================== ====== ===================== ============================= .. |metrics| replace:: ["instance_cpu_usage", "instance_ram_usage"] .. |thresholds| replace:: {"instance_cpu_usage": 0.2, "instance_ram_usage": 0.2} .. |weights| replace:: {"instance_cpu_usage_weight": 1.0, "instance_ram_usage_weight": 1.0} .. |instance_metrics| replace:: {"instance_cpu_usage": "compute.node.cpu.percent", "instance_ram_usage": "hardware.memory.used"} .. |periods| replace:: {"instance": 720, "node": 600} Efficacy Indicator ------------------ .. watcher-func:: :format: literal_block watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator Algorithm --------- You can find description of overload algorithm and role of standard deviation here: https://specs.openstack.org/openstack/watcher-specs/specs/newton/implemented/sd-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 workload_balancing --strategy workload_stabilization $ openstack optimize audit create -a at1 \ -p thresholds='{"instance_ram_usage": 0.05}' \ -p metrics='["instance_ram_usage"]' External Links -------------- - `Watcher Overload standard deviation algorithm spec `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/workload_balance.rst0000664000175000017500000000541100000000000025116 0ustar00zuulzuul00000000000000=================================== Workload Balance Migration Strategy =================================== Synopsis -------- **display name**: ``Workload Balance Migration Strategy`` **goal**: ``workload_balancing`` .. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance.WorkloadBalance Requirements ------------ None. Metrics ******* The *workload_balance* strategy requires the following metrics: ======================= ============ ======= ========================= metric service name plugins comment ======================= ============ ======= ========================= ``cpu`` ceilometer_ none ``memory.resident`` ceilometer_ none ======================= ============ ======= ========================= .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ============== ====== ==================== ==================================== parameter type default Value description ============== ====== ==================== ==================================== ``metrics`` String 'instance_cpu_usage' Workload balance base on cpu or ram utilization. Choices: ['instance_cpu_usage', 'instance_ram_usage'] ``threshold`` Number 25.0 Workload threshold for migration ``period`` Number 300 Aggregate time period of ceilometer ============== ====== ==================== ==================================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the Workload Balance Migration Strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/workload-balance-migration-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 workload_balancing --strategy workload_balance $ openstack optimize audit create -a at1 -p threshold=26.0 \ -p period=310 -p metrics=instance_cpu_usage External Links -------------- None. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/strategies/zone_migration.rst0000664000175000017500000001246400000000000024661 0ustar00zuulzuul00000000000000============== Zone migration ============== Synopsis -------- **display name**: ``Zone migration`` **goal**: ``hardware_maintenance`` .. watcher-term:: watcher.decision_engine.strategy.strategies.zone_migration.ZoneMigration Requirements ------------ Metrics ******* None Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Storage cluster data model is also required: .. watcher-term:: watcher.decision_engine.model.collector.cinder.CinderClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migrate`` - .. watcher-term:: watcher.applier.actions.migration.Migrate * - ``volume_migrate`` - .. watcher-term:: watcher.applier.actions.volume_migration.VolumeMigrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ======================== ======== ============= ============================== parameter type default Value description ======================== ======== ============= ============================== ``compute_nodes`` array None Compute nodes to migrate. ``storage_pools`` array None Storage pools to migrate. ``parallel_total`` integer 6 The number of actions to be run in parallel in total. ``parallel_per_node`` integer 2 The number of actions to be run in parallel per compute node. ``parallel_per_pool`` integer 2 The number of actions to be run in parallel per storage pool. ``priority`` object None List prioritizes instances and volumes. ``with_attached_volume`` boolean False False: Instances will migrate after all volumes migrate. True: An instance will migrate after the attached volumes migrate. ======================== ======== ============= ============================== The elements of compute_nodes array are: ============= ======= =============== ============================= parameter type default Value description ============= ======= =============== ============================= ``src_node`` string None Compute node from which instances migrate(mandatory). ``dst_node`` string None Compute node to which instances migrate. ============= ======= =============== ============================= The elements of storage_pools array are: ============= ======= =============== ============================== parameter type default Value description ============= ======= =============== ============================== ``src_pool`` string None Storage pool from which volumes migrate(mandatory). ``dst_pool`` string None Storage pool to which volumes migrate. ``src_type`` string None Source volume type(mandatory). ``dst_type`` string None Destination volume type (mandatory). ============= ======= =============== ============================== The elements of priority object are: ================ ======= =============== ====================== parameter type default Value description ================ ======= =============== ====================== ``project`` array None Project names. ``compute_node`` array None Compute node names. ``storage_pool`` array None Storage pool names. ``compute`` enum None Instance attributes. |compute| ``storage`` enum None Volume attributes. |storage| ================ ======= =============== ====================== .. |compute| replace:: ["vcpu_num", "mem_size", "disk_size", "created_at"] .. |storage| replace:: ["size", "created_at"] Efficacy Indicator ------------------ .. watcher-func:: :format: literal_block watcher.decision_engine.goal.efficacy.specs.HardwareMaintenance.get_global_efficacy_indicator Algorithm --------- For more information on the zone migration strategy please refer to: http://specs.openstack.org/openstack/watcher-specs/specs/queens/implemented/zone-migration-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 hardware_maintenance --strategy zone_migration $ openstack optimize audit create -a at1 \ -p compute_nodes='[{"src_node": "s01", "dst_node": "d01"}]' External Links -------------- None ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/doc/source/user/0000775000175000017500000000000000000000000017700 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/user/event_type_audit.rst0000664000175000017500000003044200000000000024005 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================== Audit using Aodh alarm ====================== Audit with EVENT type can be triggered by special alarm. This guide walks you through the steps to build an event-driven optimization solution by integrating Watcher with Ceilometer/Aodh. Step 1: Create an audit with EVENT type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first step is to create an audit with EVENT type, you can create an audit template firstly: .. code-block:: bash $ openstack optimize audittemplate create your_template_name \ --strategy or create an audit directly with special goal and strategy: .. code-block:: bash $ openstack optimize audit create --goal \ --strategy --audit_type EVENT This is an example for creating an audit with dummy strategy: .. code-block:: bash $ openstack optimize audit create --goal dummy \ --strategy dummy --audit_type EVENT +---------------+--------------------------------------+ | Field | Value | +---------------+--------------------------------------+ | UUID | a3326a6a-c18e-4e8e-adba-d0c61ad404c5 | | Name | dummy-2020-01-14T03:21:19.168467 | | Created At | 2020-01-14T03:21:19.200279+00:00 | | Updated At | None | | Deleted At | None | | State | PENDING | | Audit Type | EVENT | | Parameters | {u'para2': u'hello', u'para1': 3.2} | | Interval | None | | Goal | dummy | | Strategy | dummy | | Audit Scope | [] | | Auto Trigger | False | | Next Run Time | None | | Hostname | None | | Start Time | None | | End Time | None | | Force | False | +---------------+--------------------------------------+ We need to build Aodh action url using Watcher webhook API. For convenience we export the url into an environment variable: .. code-block:: bash $ export AUDIT_UUID=a3326a6a-c18e-4e8e-adba-d0c61ad404c5 $ export ALARM_URL="trust+http://localhost/infra-optim/v1/webhooks/$AUDIT_UUID" Step 2: Create Aodh Alarm ~~~~~~~~~~~~~~~~~~~~~~~~~ Once we have the audit created, we can continue to create Aodh alarm and set the alarm action to Watcher webhook API. The alarm type can be event( i.e. ``compute.instance.create.end``) or gnocchi_resources_threshold(i.e. ``cpu_util``), more info refer to alarm-creation_ For example: .. code-block:: bash $ openstack alarm create \ --type event --name instance_create \ --event-type "compute.instance.create.end" \ --enable True --repeat-actions False \ --alarm-action $ALARM_URL +---------------------------+------------------------------------------------------------------------------------------+ | Field | Value | +---------------------------+------------------------------------------------------------------------------------------+ | alarm_actions | [u'trust+http://localhost/infra-optim/v1/webhooks/a3326a6a-c18e-4e8e-adba-d0c61ad404c5'] | | alarm_id | b9e381fc-8e3e-4943-82ee-647e7a2ef644 | | description | Alarm when compute.instance.create.end event occurred. | | enabled | True | | event_type | compute.instance.create.end | | insufficient_data_actions | [] | | name | instance_create | | ok_actions | [] | | project_id | 728d66e18c914af1a41e2a585cf766af | | query | | | repeat_actions | False | | severity | low | | state | insufficient data | | state_reason | Not evaluated yet | | state_timestamp | 2020-01-14T03:56:26.894416 | | time_constraints | [] | | timestamp | 2020-01-14T03:56:26.894416 | | type | event | | user_id | 88c40156af7445cc80580a1e7e3ba308 | +---------------------------+------------------------------------------------------------------------------------------+ .. _alarm-creation: https://docs.openstack.org/aodh/latest/admin/telemetry-alarms.html#alarm-creation Step 3: Trigger the alarm ~~~~~~~~~~~~~~~~~~~~~~~~~ In this example, you can create a new instance to trigger the alarm. The alarm state will translate from ``insufficient data`` to ``alarm``. .. code-block:: bash $ openstack alarm show b9e381fc-8e3e-4943-82ee-647e7a2ef644 +---------------------------+-------------------------------------------------------------------------------------------------------------------+ | Field | Value | +---------------------------+-------------------------------------------------------------------------------------------------------------------+ | alarm_actions | [u'trust+http://localhost/infra-optim/v1/webhooks/a3326a6a-c18e-4e8e-adba-d0c61ad404c5'] | | alarm_id | b9e381fc-8e3e-4943-82ee-647e7a2ef644 | | description | Alarm when compute.instance.create.end event occurred. | | enabled | True | | event_type | compute.instance.create.end | | insufficient_data_actions | [] | | name | instance_create | | ok_actions | [] | | project_id | 728d66e18c914af1a41e2a585cf766af | | query | | | repeat_actions | False | | severity | low | | state | alarm | | state_reason | Event hits the query . | | state_timestamp | 2020-01-14T03:56:26.894416 | | time_constraints | [] | | timestamp | 2020-01-14T06:17:40.350649 | | type | event | | user_id | 88c40156af7445cc80580a1e7e3ba308 | +---------------------------+-------------------------------------------------------------------------------------------------------------------+ Step 4: Verify the audit ~~~~~~~~~~~~~~~~~~~~~~~~ This can be verified to check if the audit state was ``SUCCEEDED``: .. code-block:: bash $ openstack optimize audit show a3326a6a-c18e-4e8e-adba-d0c61ad404c5 +---------------+--------------------------------------+ | Field | Value | +---------------+--------------------------------------+ | UUID | a3326a6a-c18e-4e8e-adba-d0c61ad404c5 | | Name | dummy-2020-01-14T03:21:19.168467 | | Created At | 2020-01-14T03:21:19+00:00 | | Updated At | 2020-01-14T06:26:40+00:00 | | Deleted At | None | | State | SUCCEEDED | | Audit Type | EVENT | | Parameters | {u'para2': u'hello', u'para1': 3.2} | | Interval | None | | Goal | dummy | | Strategy | dummy | | Audit Scope | [] | | Auto Trigger | False | | Next Run Time | None | | Hostname | ubuntudbs | | Start Time | None | | End Time | None | | Force | False | +---------------+--------------------------------------+ and you can use the following command to check if the action plan was created: .. code-block:: bash $ openstack optimize actionplan list --audit a3326a6a-c18e-4e8e-adba-d0c61ad404c5 +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ | UUID | Audit | State | Updated At | Global efficacy | +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ | 673b3fcb-8c16-4a41-9ee3-2956d9f6ca9e | a3326a6a-c18e-4e8e-adba-d0c61ad404c5 | RECOMMENDED | None | | +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/user/index.rst0000664000175000017500000000016500000000000021543 0ustar00zuulzuul00000000000000========== User Guide ========== .. toctree:: :maxdepth: 2 ways-to-install user-guide event_type_audit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/user/user-guide.rst0000664000175000017500000001354100000000000022507 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ================== Watcher User Guide ================== See the `architecture page `_ for an architectural overview of the different components of Watcher and how they fit together. In this guide we're going to take you through the fundamentals of using Watcher. The following diagram shows the main interactions between the :ref:`Administrator ` and the Watcher system: .. image:: ../images/sequence_overview_watcher_usage.png :width: 100% Getting started with Watcher ---------------------------- This guide assumes you have a working installation of Watcher. If you get "*watcher: command not found*" you may have to verify your installation. Please refer to the `installation guide`_. In order to use Watcher, you have to configure your credentials suitable for watcher command-line tools. You can interact with Watcher either by using our dedicated `Watcher CLI`_ named ``watcher``, or by using the `OpenStack CLI`_ ``openstack``. If you want to deploy Watcher in Horizon, please refer to the `Watcher Horizon plugin installation guide`_. .. note:: Notice, that in this guide we'll use `OpenStack CLI`_ as major interface. Nevertheless, you can use `Watcher CLI`_ in the same way. It can be achieved by replacing .. code:: bash $ openstack optimize ... with .. code:: bash $ watcher ... .. _`installation guide`: https://docs.openstack.org/watcher/latest/install/ .. _`Watcher Horizon plugin installation guide`: https://docs.openstack.org/watcher-dashboard/latest/install/installation.html .. _`OpenStack CLI`: https://docs.openstack.org/python-openstackclient/latest/cli/man/openstack.html .. _`Watcher CLI`: https://docs.openstack.org/python-watcherclient/latest/cli/index.html Watcher CLI Command ------------------- We can see all of the commands available with Watcher CLI by running the watcher binary without options. .. code:: bash $ openstack help optimize Running an audit of the cluster ------------------------------- First, you need to find the :ref:`goal ` you want to achieve: .. code:: bash $ openstack optimize goal list .. note:: If you get "*You must provide a username via either --os-username or via env[OS_USERNAME]*" you may have to verify your credentials. Then, you can create an :ref:`audit template `. An :ref:`audit template ` defines an optimization :ref:`goal ` to achieve (i.e. the settings of your audit). .. code:: bash $ openstack optimize audittemplate create my_first_audit_template Although optional, you may want to actually set a specific strategy for your audit template. If so, you may can search of its UUID or name using the following command: .. code:: bash $ openstack optimize strategy list --goal You can use the following command to check strategy details including which parameters of which format it supports: .. code:: bash $ openstack optimize strategy show The command to create your audit template would then be: .. code:: bash $ openstack optimize audittemplate create my_first_audit_template \ --strategy Then, you can create an audit. An audit is a request for optimizing your cluster depending on the specified :ref:`goal `. You can launch an audit on your cluster by referencing the :ref:`audit template ` (i.e. the settings of your audit) that you want to use. - Get the :ref:`audit template ` UUID or name: .. code:: bash $ openstack optimize audittemplate list - Start an audit based on this :ref:`audit template ` settings: .. code:: bash $ openstack optimize audit create -a If your_audit_template was created by --strategy , and it defines some parameters (command ``watcher strategy show`` to check parameters format), your can append ``-p`` to input required parameters: .. code:: bash $ openstack optimize audit create -a \ -p =5.5 -p =hi Input parameter could cause audit creation failure, when: - no predefined strategy for audit template - no parameters spec in predefined strategy - input parameters don't comply with spec Watcher service will compute an :ref:`Action Plan ` composed of a list of potential optimization :ref:`actions ` (instance migration, disabling of a compute node, ...) according to the :ref:`goal ` to achieve. - Wait until the Watcher audit has produced a new :ref:`action plan `, and get it: .. code:: bash $ openstack optimize actionplan list --audit - Have a look on the list of optimization :ref:`actions ` contained in this new :ref:`action plan `: .. code:: bash $ openstack optimize action list --action-plan Once you have learned how to create an :ref:`Action Plan `, it's time to go further by applying it to your cluster: - Execute the :ref:`action plan `: .. code:: bash $ openstack optimize actionplan start You can follow the states of the :ref:`actions ` by periodically calling: .. code:: bash $ openstack optimize action list --action-plan You can also obtain more detailed information about a specific action: .. code:: bash $ openstack optimize action show ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/doc/source/user/ways-to-install.rst0000664000175000017500000001051300000000000023501 0ustar00zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ======================= Ways to install Watcher ======================= This document describes some ways to install Watcher in order to use it. If you are intending to develop on or with Watcher, please read :doc:`../contributor/environment`. Prerequisites ------------- The source install instructions specifically avoid using platform specific packages, instead using the source for the code and the Python Package Index (PyPi_). .. _PyPi: https://pypi.org/ It's expected that your system already has python2.7_, latest version of pip_, and git_ available. .. _python2.7: https://www.python.org .. _pip: https://pip.pypa.io/en/latest/installing/ .. _git: https://git-scm.com/ Your system shall also have some additional system libraries: On Ubuntu (tested on 16.04LTS): .. code-block:: bash $ sudo apt-get install python-dev libssl-dev libmysqlclient-dev libffi-dev On Fedora-based distributions e.g., Fedora/RHEL/CentOS/Scientific Linux (tested on CentOS 7.1): .. code-block:: bash $ sudo yum install gcc python-devel openssl-devel libffi-devel mysql-devel Installing from Source ---------------------- Clone the Watcher repository: .. code-block:: bash $ git clone https://opendev.org/openstack/watcher.git $ cd watcher Install the Watcher modules: .. code-block:: bash # python setup.py install The following commands should be available on the command-line path: * ``watcher-api`` the Watcher Web service used to handle RESTful requests * ``watcher-decision-engine`` the Watcher Decision Engine used to build action plans, according to optimization goals to achieve. * ``watcher-applier`` the Watcher Applier module, used to apply action plan * ``watcher-db-manage`` used to bootstrap Watcher data You will find sample configuration files in ``etc/watcher``: * ``watcher.conf.sample`` Install the Watcher modules dependencies: .. code-block:: bash # pip install -r requirements.txt From here, refer to :doc:`../configuration/configuring` to declare Watcher as a new service into Keystone and to configure its different modules. Once configured, you should be able to run the Watcher services by issuing these commands: .. code-block:: bash $ watcher-api $ watcher-decision-engine $ watcher-applier By default, this will show logging on the console from which it was started. Once started, you can use the `Watcher Client`_ to play with Watcher service. .. _`Watcher Client`: https://opendev.org/openstack/python-watcherclient Installing from packages: PyPI -------------------------------- Watcher package is available on PyPI repository. To install Watcher on your system: .. code-block:: bash $ sudo pip install python-watcher The Watcher services along with its dependencies should then be automatically installed on your system. Once installed, you still need to declare Watcher as a new service into Keystone and to configure its different modules, which you can find described in :doc:`../configuration/configuring`. Installing from packages: Debian (experimental) ----------------------------------------------- Experimental Debian packages are available on `Debian repositories`_. The best way to use them is to install them into a Docker_ container. Here is single Dockerfile snippet you can use to run your Docker container: .. code-block:: bash FROM debian:experimental MAINTAINER David TARDIVEL RUN apt-get update RUN apt-get dist-upgrade RUN apt-get install vim net-tools RUN apt-get install experimental watcher-api CMD ["/usr/bin/watcher-api"] Build your container from this Dockerfile: .. code-block:: bash $ docker build -t watcher/api . To run your container, execute this command: .. code-block:: bash $ docker run -d -p 9322:9322 watcher/api Check in your logs Watcher API is started .. code-block:: bash $ docker logs You can run similar container with Watcher Decision Engine (package ``watcher-decision-engine``) and with the Watcher Applier (package ``watcher-applier``). .. _Docker: https://www.docker.com/ .. _`Debian repositories`: https://packages.debian.org/experimental/allpackages ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.5991352 python_watcher-14.0.0/etc/0000775000175000017500000000000000000000000015430 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/etc/apache2/0000775000175000017500000000000000000000000016733 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/etc/apache2/watcher0000664000175000017500000000224600000000000020317 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using # Watcher API through mod_wsgi Listen 9322 WSGIDaemonProcess watcher-api user=stack group=stack processes=2 threads=2 display-name=%{GROUP} WSGIScriptAlias / /usr/local/bin/watcher-api-wsgi WSGIProcessGroup watcher-api ErrorLog /var/log/httpd/watcher_error.log LogLevel info CustomLog /var/log/httpd/watcher_access.log combined WSGIProcessGroup watcher-api WSGIApplicationGroup %{GLOBAL} AllowOverride All Require all granted ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/etc/watcher/0000775000175000017500000000000000000000000017065 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/etc/watcher/README-watcher.conf.txt0000664000175000017500000000020300000000000023135 0ustar00zuulzuul00000000000000To generate the sample watcher.conf file, run the following command from the top level of the watcher directory: tox -e genconfig ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/etc/watcher/oslo-config-generator/0000775000175000017500000000000000000000000023270 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/etc/watcher/oslo-config-generator/watcher.conf0000664000175000017500000000062600000000000025600 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/watcher/watcher.conf.sample wrap_width = 79 namespace = watcher namespace = keystonemiddleware.auth_token namespace = oslo.cache namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.policy namespace = oslo.reports namespace = oslo.service.periodic_task namespace = oslo.service.service namespace = oslo.service.wsgi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/etc/watcher/oslo-policy-generator/0000775000175000017500000000000000000000000023322 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/etc/watcher/oslo-policy-generator/watcher-policy-generator.conf0000664000175000017500000000011400000000000031103 0ustar00zuulzuul00000000000000[DEFAULT] output_file = /etc/watcher/policy.yaml.sample namespace = watcher ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/playbooks/0000775000175000017500000000000000000000000016660 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/playbooks/generate_prometheus_config.yml0000664000175000017500000000034200000000000024774 0ustar00zuulzuul00000000000000--- - hosts: all tasks: - name: Generate prometheus.yml config file delegate_to: controller template: src: "templates/prometheus.yml.j2" dest: "/home/zuul/prometheus.yml" mode: "0644" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6151352 python_watcher-14.0.0/playbooks/templates/0000775000175000017500000000000000000000000020656 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/playbooks/templates/prometheus.yml.j20000664000175000017500000000053400000000000024110 0ustar00zuulzuul00000000000000global: scrape_interval: 10s scrape_configs: - job_name: "node" static_configs: - targets: ["localhost:3000"] {% if 'compute' in groups %} {% for host in groups['compute'] %} - targets: ["{{ hostvars[host]['ansible_fqdn'] }}:9100"] labels: fqdn: "{{ hostvars[host]['ansible_fqdn'] }}" {% endfor %} {% endif %} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6591353 python_watcher-14.0.0/python_watcher.egg-info/0000775000175000017500000000000000000000000021405 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591600.0 python_watcher-14.0.0/python_watcher.egg-info/PKG-INFO0000644000175000017500000000742600000000000022511 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: python-watcher Version: 14.0.0 Summary: OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Home-page: https://docs.openstack.org/watcher/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.9 License-File: LICENSE Requires-Dist: apscheduler>=3.5.1 Requires-Dist: eventlet>=0.27.0 Requires-Dist: jsonpatch>=1.21 Requires-Dist: keystoneauth1>=3.4.0 Requires-Dist: jsonschema>=3.2.0 Requires-Dist: keystonemiddleware>=4.21.0 Requires-Dist: lxml>=4.5.1 Requires-Dist: croniter>=0.3.20 Requires-Dist: os-resource-classes>=0.4.0 Requires-Dist: oslo.concurrency>=3.26.0 Requires-Dist: oslo.cache>=1.29.0 Requires-Dist: oslo.config>=6.8.0 Requires-Dist: oslo.context>=2.21.0 Requires-Dist: oslo.db>=4.44.0 Requires-Dist: oslo.i18n>=3.20.0 Requires-Dist: oslo.log>=3.37.0 Requires-Dist: oslo.messaging>=14.1.0 Requires-Dist: oslo.policy>=4.5.0 Requires-Dist: oslo.reports>=1.27.0 Requires-Dist: oslo.serialization>=2.25.0 Requires-Dist: oslo.service>=1.30.0 Requires-Dist: oslo.upgradecheck>=1.3.0 Requires-Dist: oslo.utils>=7.0.0 Requires-Dist: oslo.versionedobjects>=1.32.0 Requires-Dist: PasteDeploy>=1.5.2 Requires-Dist: pbr>=3.1.1 Requires-Dist: pecan>=1.3.2 Requires-Dist: PrettyTable>=0.7.2 Requires-Dist: gnocchiclient>=7.0.1 Requires-Dist: python-cinderclient>=3.5.0 Requires-Dist: python-glanceclient>=2.9.1 Requires-Dist: python-keystoneclient>=3.15.0 Requires-Dist: python-monascaclient>=1.12.0 Requires-Dist: python-neutronclient>=6.7.0 Requires-Dist: python-novaclient>=14.1.0 Requires-Dist: python-observabilityclient>=0.3.0 Requires-Dist: python-openstackclient>=3.14.0 Requires-Dist: python-ironicclient>=2.5.0 Requires-Dist: SQLAlchemy>=1.2.5 Requires-Dist: stevedore>=1.28.0 Requires-Dist: taskflow>=3.8.0 Requires-Dist: WebOb>=1.8.5 Requires-Dist: WSME>=0.9.2 Requires-Dist: networkx>=2.4 Requires-Dist: microversion_parse>=0.2.1 Requires-Dist: futurist>=1.8.0 ======= Watcher ======= .. image:: https://governance.openstack.org/tc/badges/watcher.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on .. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Watcher provides a robust framework to realize a wide range of cloud optimization goals, including the reduction of data center operating costs, increased system performance via intelligent virtual machine migration, increased energy efficiency and more! * Free software: Apache license * Wiki: https://wiki.openstack.org/wiki/Watcher * Source: https://opendev.org/openstack/watcher * Bugs: https://bugs.launchpad.net/watcher * Documentation: https://docs.openstack.org/watcher/latest/ * Release notes: https://docs.openstack.org/releasenotes/watcher/ * Design specifications: https://specs.openstack.org/openstack/watcher-specs/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591600.0 python_watcher-14.0.0/python_watcher.egg-info/SOURCES.txt0000664000175000017500000012300000000000000023265 0ustar00zuulzuul00000000000000.coveragerc .mailmap .pre-commit-config.yaml .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/conf.py api-ref/source/index.rst api-ref/source/parameters.yaml api-ref/source/watcher-api-v1-actionplans.inc api-ref/source/watcher-api-v1-actions.inc api-ref/source/watcher-api-v1-audits.inc api-ref/source/watcher-api-v1-audittemplates.inc api-ref/source/watcher-api-v1-datamodel.inc api-ref/source/watcher-api-v1-goals.inc api-ref/source/watcher-api-v1-scoring_engines.inc api-ref/source/watcher-api-v1-services.inc api-ref/source/watcher-api-v1-strategies.inc api-ref/source/watcher-api-v1-webhooks.inc api-ref/source/watcher-api-versions.inc api-ref/source/samples/actionplan-cancel-request-cancelling.json api-ref/source/samples/actionplan-cancel-request-pending.json api-ref/source/samples/actionplan-list-detailed-response.json api-ref/source/samples/actionplan-list-response.json api-ref/source/samples/actionplan-show-response.json api-ref/source/samples/actionplan-start-response.json api-ref/source/samples/actions-list-detailed-response.json api-ref/source/samples/actions-list-response.json api-ref/source/samples/actions-show-response.json api-ref/source/samples/api-root-response.json api-ref/source/samples/api-v1-root-response.json api-ref/source/samples/audit-cancel-request.json api-ref/source/samples/audit-cancel-response.json api-ref/source/samples/audit-create-request-continuous.json api-ref/source/samples/audit-create-request-oneshot.json api-ref/source/samples/audit-create-response.json api-ref/source/samples/audit-list-detailed-response.json api-ref/source/samples/audit-list-response.json api-ref/source/samples/audit-show-response.json api-ref/source/samples/audit-update-request.json api-ref/source/samples/audit-update-response.json api-ref/source/samples/audittemplate-create-request-full.json api-ref/source/samples/audittemplate-create-request-minimal.json api-ref/source/samples/audittemplate-create-response.json api-ref/source/samples/audittemplate-list-detailed-response.json api-ref/source/samples/audittemplate-list-response.json api-ref/source/samples/audittemplate-show-response.json api-ref/source/samples/audittemplate-update-request.json api-ref/source/samples/audittemplate-update-response.json api-ref/source/samples/datamodel-list-response.json api-ref/source/samples/goal-list-response.json api-ref/source/samples/goal-show-response.json api-ref/source/samples/scoring_engine-list-detailed-response.json api-ref/source/samples/scoring_engine-list-response.json api-ref/source/samples/scoring_engine-show-response.json api-ref/source/samples/service-list-detailed-response.json api-ref/source/samples/service-list-response.json api-ref/source/samples/service-show-response.json api-ref/source/samples/strategy-list-detailed-response.json api-ref/source/samples/strategy-list-response.json api-ref/source/samples/strategy-show-response.json api-ref/source/samples/strategy-state-response.json devstack/local.conf.compute devstack/local.conf.controller devstack/override-defaults devstack/plugin.sh devstack/settings devstack/files/apache-watcher-api.template devstack/lib/watcher devstack/upgrade/resources.sh devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh devstack/upgrade/from_rocky/upgrade-watcher doc/dictionary.txt doc/requirements.txt doc/ext/__init__.py doc/ext/term.py doc/ext/versioned_notifications.py doc/notification_samples/action-cancel-end.json doc/notification_samples/action-cancel-error.json doc/notification_samples/action-cancel-start.json doc/notification_samples/action-create.json doc/notification_samples/action-delete.json doc/notification_samples/action-execution-end.json doc/notification_samples/action-execution-error.json doc/notification_samples/action-execution-start.json doc/notification_samples/action-update.json doc/notification_samples/action_plan-cancel-end.json doc/notification_samples/action_plan-cancel-error.json doc/notification_samples/action_plan-cancel-start.json doc/notification_samples/action_plan-create.json doc/notification_samples/action_plan-delete.json doc/notification_samples/action_plan-execution-end.json doc/notification_samples/action_plan-execution-error.json doc/notification_samples/action_plan-execution-start.json doc/notification_samples/action_plan-update.json doc/notification_samples/audit-create.json doc/notification_samples/audit-delete.json doc/notification_samples/audit-planner-end.json doc/notification_samples/audit-planner-error.json doc/notification_samples/audit-planner-start.json doc/notification_samples/audit-strategy-end.json doc/notification_samples/audit-strategy-error.json doc/notification_samples/audit-strategy-start.json doc/notification_samples/audit-update.json doc/notification_samples/infra-optim-exception.json doc/notification_samples/service-update.json doc/source/architecture.rst doc/source/conf.py doc/source/glossary.rst doc/source/index.rst doc/source/_static/.placeholder doc/source/admin/apache-mod-wsgi.rst doc/source/admin/gmr.rst doc/source/admin/index.rst doc/source/admin/policy.rst doc/source/configuration/configuring.rst doc/source/configuration/index.rst doc/source/configuration/watcher.rst doc/source/contributor/api_microversion_history.rst doc/source/contributor/concurrency.rst doc/source/contributor/contributing.rst doc/source/contributor/devstack.rst doc/source/contributor/environment.rst doc/source/contributor/index.rst doc/source/contributor/notifications.rst doc/source/contributor/rally_link.rst doc/source/contributor/testing.rst doc/source/contributor/plugin/action-plugin.rst doc/source/contributor/plugin/base-setup.rst doc/source/contributor/plugin/cdmc-plugin.rst doc/source/contributor/plugin/goal-plugin.rst doc/source/contributor/plugin/index.rst doc/source/contributor/plugin/planner-plugin.rst doc/source/contributor/plugin/plugins.rst doc/source/contributor/plugin/scoring-engine-plugin.rst doc/source/contributor/plugin/strategy-plugin.rst doc/source/datasources/grafana.rst doc/source/datasources/index.rst doc/source/datasources/prometheus.rst doc/source/image_src/dia/architecture.dia doc/source/image_src/dia/functional_data_model.dia doc/source/image_src/plantuml/README.rst doc/source/image_src/plantuml/action_plan_state_machine.txt doc/source/image_src/plantuml/audit_state_machine.txt doc/source/image_src/plantuml/sequence_architecture_cdmc_sync.txt doc/source/image_src/plantuml/sequence_create_and_launch_audit.txt doc/source/image_src/plantuml/sequence_create_audit_template.txt doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creation.txt doc/source/image_src/plantuml/sequence_launch_action_plan.txt doc/source/image_src/plantuml/sequence_launch_action_plan_in_applier.txt doc/source/image_src/plantuml/sequence_overview_watcher_usage.txt doc/source/image_src/plantuml/sequence_trigger_audit_in_decision_engine.txt doc/source/image_src/plantuml/watcher_db_schema_diagram.txt doc/source/images/action_plan_state_machine.png doc/source/images/architecture.svg doc/source/images/audit_state_machine.png doc/source/images/functional_data_model.svg doc/source/images/sequence_architecture_cdmc_sync.png doc/source/images/sequence_create_and_launch_audit.png doc/source/images/sequence_create_audit_template.png doc/source/images/sequence_from_audit_execution_to_actionplan_creation.png doc/source/images/sequence_launch_action_plan.png doc/source/images/sequence_launch_action_plan_in_applier.png doc/source/images/sequence_overview_watcher_usage.png doc/source/images/sequence_trigger_audit_in_decision_engine.png doc/source/images/watcher_db_schema_diagram.png doc/source/install/common_configure.rst doc/source/install/common_prerequisites.rst doc/source/install/get_started.rst doc/source/install/index.rst doc/source/install/install-rdo.rst doc/source/install/install-ubuntu.rst doc/source/install/install.rst doc/source/install/next-steps.rst doc/source/install/verify.rst doc/source/man/footer.rst doc/source/man/general-options.rst doc/source/man/index.rst doc/source/man/watcher-api.rst doc/source/man/watcher-applier.rst doc/source/man/watcher-db-manage.rst doc/source/man/watcher-decision-engine.rst doc/source/man/watcher-status.rst doc/source/strategies/actuation.rst doc/source/strategies/basic-server-consolidation.rst doc/source/strategies/host_maintenance.rst doc/source/strategies/index.rst doc/source/strategies/node_resource_consolidation.rst doc/source/strategies/noisy_neighbor.rst doc/source/strategies/outlet_temp_control.rst doc/source/strategies/saving_energy.rst doc/source/strategies/storage_capacity_balance.rst doc/source/strategies/strategy-template.rst doc/source/strategies/uniform_airflow.rst doc/source/strategies/vm_workload_consolidation.rst doc/source/strategies/workload-stabilization.rst doc/source/strategies/workload_balance.rst doc/source/strategies/zone_migration.rst doc/source/user/event_type_audit.rst doc/source/user/index.rst doc/source/user/user-guide.rst doc/source/user/ways-to-install.rst etc/apache2/watcher etc/watcher/README-watcher.conf.txt etc/watcher/oslo-config-generator/watcher.conf etc/watcher/oslo-policy-generator/watcher-policy-generator.conf playbooks/generate_prometheus_config.yml playbooks/templates/prometheus.yml.j2 python_watcher.egg-info/PKG-INFO python_watcher.egg-info/SOURCES.txt python_watcher.egg-info/dependency_links.txt python_watcher.egg-info/entry_points.txt python_watcher.egg-info/not-zip-safe python_watcher.egg-info/pbr.json python_watcher.egg-info/requires.txt python_watcher.egg-info/top_level.txt rally-jobs/README.rst rally-jobs/watcher-watcher.yaml releasenotes/notes/.placeholder releasenotes/notes/2025.1-prelude-8be97eece4e1d1ff.yaml releasenotes/notes/action-plan-cancel-c54726378019e096.yaml releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yaml releasenotes/notes/action-versioned-notifications-api-ff94fc0f401292d0.yaml releasenotes/notes/add-baremetal-scoper-9ef23f5fb8f0be6a.yaml releasenotes/notes/add-force-field-to-audit-4bcaeedfe27233ad.yaml releasenotes/notes/add-ha-support-b9042255e5b76e42.yaml releasenotes/notes/add-instance-metrics-to-prometheus-datasource-9fba8c174ff845e1.yaml releasenotes/notes/add-name-for-audit-0df1f39f00736f06.yaml releasenotes/notes/add-plugins-parameters-376eb6b0b8978b44.yaml releasenotes/notes/add-power-on-off-a77673d482568a8b.yaml releasenotes/notes/add-scoring-module-fa00d013ed2d614e.yaml releasenotes/notes/add-start-end-time-for-continuous-audit-52c45052cb06d153.yaml releasenotes/notes/add-upgrade-check-framework-5bb9693c8a78931c.yaml releasenotes/notes/api-call-retry-fef741ac684c58dd.yaml releasenotes/notes/api-microversioning-7999a3ee8073bf32.yaml releasenotes/notes/audit-scoper-for-storage-data-model-cdccc803542d22db.yaml releasenotes/notes/audit-tag-vm-metadata-47a3e4468748853c.yaml releasenotes/notes/audit-versioned-notifications-api-bca7738e16954bad.yaml releasenotes/notes/automatic-triggering-audit-8a9b0540d547db60.yaml releasenotes/notes/background-jobs-ha-9d3cf3fe356f4705.yaml releasenotes/notes/bp-audit-scope-exclude-project-511a7720aac00dff.yaml releasenotes/notes/build-baremetal-data-model-in-watcher-3023453a47b61dab.yaml releasenotes/notes/cdm-scoping-8d9c307bad46bfa1.yaml releasenotes/notes/centralise-config-opts-95670987dfbdb0e7.yaml releasenotes/notes/change-ram-util-metric-4a3e6984b9dd968d.yaml releasenotes/notes/check-strategy-requirements-66f9e9262412f8ec.yaml releasenotes/notes/cinder-model-integration-baa394a72a0a33bf.yaml releasenotes/notes/cluster-model-objects-wrapper-9c799ea262c56a5b.yaml releasenotes/notes/compute-cdm-include-all-instances-f7506ded2d57732f.yaml releasenotes/notes/configurable-weights-default-planner-3746b33160bc7347.yaml releasenotes/notes/consume-nova-versioned-notifications-f98361b37e546b4d.yaml releasenotes/notes/continuously-optimization-35364f4d2c0b81fc.yaml releasenotes/notes/cron-based-continuous-audits-c3eedf28d9752b37.yaml releasenotes/notes/datasource-query-retry-00cba5f7e68aec39.yaml releasenotes/notes/db-migration-e1a705a8b54ccdd2.yaml releasenotes/notes/define-the-audit-scope-e89edc5051dcf3f2.yaml releasenotes/notes/deprecate-ceilometer-datasource-446b0be70fbce28b.yaml releasenotes/notes/deprecate-json-formatted-policy-file-3a92379e9f5dd203.yaml releasenotes/notes/deprecate-monasca-ds-9065f4d4bee09ab2.yaml releasenotes/notes/drop-py-2-7-54f8e806d71f19a7.yaml releasenotes/notes/drop-python38-support-eeb19a0bc0160sw1.yaml releasenotes/notes/dynamic-action-description-0e947b9e7ef2a134.yaml releasenotes/notes/efficacy-indicator-95380ad7b84e3be2.yaml releasenotes/notes/enhance-watcher-applier-engine-86c676ce8f179e68.yaml releasenotes/notes/event-driven-optimization-based-4870f112bef8a560.yaml releasenotes/notes/file-based-metric-map-c2af62b5067895df.yaml releasenotes/notes/formal-datasource-interface-implementation-222769d55a127d33.yaml releasenotes/notes/general-purpose-decision-engine-threadpool-0711b23abfc9d409.yaml releasenotes/notes/get-goal-from-strategy-396c9b13a38bb650.yaml releasenotes/notes/global-datasource-preference-3ab47b4be09ff3a5.yaml releasenotes/notes/gnocchi-watcher-43c25d391fbd3e9c.yaml releasenotes/notes/grafana-datasource-b672367c23ffa0c6.yaml releasenotes/notes/graph-based-cluster-model-523937a6f5e66537.yaml releasenotes/notes/host-maintenance-strategy-41f640927948fb56.yaml releasenotes/notes/improve-compute-data-model-b427c85e4ed2b6fb.yaml releasenotes/notes/jsonschema-validation-79cab05d5295da00.yaml releasenotes/notes/min-required-nova-train-71f124192d88ae52.yaml releasenotes/notes/monasca-support-0b0486b8572ac38b.yaml releasenotes/notes/multiple-global-efficacy-indicator-fc11c4844a12a7d5.yaml releasenotes/notes/node-resource-consolidation-73bc0c0abfeb0b03.yaml releasenotes/notes/noisy-neighbor-strategy-a71342740b59dddc.yaml releasenotes/notes/notifications-actionplan-cancel-edb2a4a12543e2d0.yaml releasenotes/notes/optimization-threshold-21ad38f0470d0e1a.yaml releasenotes/notes/persistent-audit-parameters-ae41dd7252ba9672.yaml releasenotes/notes/planner-storage-action-plan-26ef37893c5e8648.yaml releasenotes/notes/prometheus-datasource-e56f2f7b8f3427c2.yaml releasenotes/notes/remove-ceilometer-datasource-8d9ab7d64d61e405.yaml releasenotes/notes/remove-nova-legacy-notifications-e1b6d10eff58f30a.yaml releasenotes/notes/replace-cold-migrate-to-use-nova-migration-api-cecd9a39ddd3bc58.yaml releasenotes/notes/scope-for-data-model-ea9792f90db14343.yaml releasenotes/notes/service-versioned-notifications-api-70367b79a565d900.yaml releasenotes/notes/show-datamodel-api-6945b744fd5d25d5.yaml releasenotes/notes/stale-action-plan-b6a6b08df873c128.yaml releasenotes/notes/standard-deviation-strategy-cd1d0c443fdfde9c.yaml releasenotes/notes/storage-workload-balance-0ecabbc1791e6894.yaml releasenotes/notes/support-keystoneclient-option-b30d1ff45f86a2e7.yaml releasenotes/notes/support-placement-api-58ce6bef1bbbe98a.yaml releasenotes/notes/suspended-audit-state-07f998c94e9d9a47.yaml releasenotes/notes/uniform-airflow-strategy-68cdba1419c3f770.yaml releasenotes/notes/uwsgi-support-8dcea6961e56dad0.yaml releasenotes/notes/volume-migrate-action-fc57b0ce0e4c39ae.yaml releasenotes/notes/watcher-notifications-ovo-7b44d52ef6400dd0.yaml releasenotes/notes/watcher-planner-selector-84d77549d46f362a.yaml releasenotes/notes/watcher-policies-1e86a30f0f11c6fa.yaml releasenotes/notes/watcher-service-list-7b2f4b64f71e9b89.yaml releasenotes/notes/watcher-versioned-objects-fc5abf5c81c4590c.yaml releasenotes/notes/workload-balance-base-on-cpu-or-ram-util-3ff4ee968c32b2ed.yaml releasenotes/notes/workload-balance-migration-strategy-a0b05148a57815c0.yaml releasenotes/notes/zone-migration-strategy-10f7656a2a01e607.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/2024.2.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po watcher/__init__.py watcher/_i18n.py watcher/eventlet.py watcher/version.py watcher/api/__init__.py watcher/api/acl.py watcher/api/app.py watcher/api/app.wsgi watcher/api/config.py watcher/api/hooks.py watcher/api/scheduling.py watcher/api/wsgi.py watcher/api/controllers/__init__.py watcher/api/controllers/base.py watcher/api/controllers/link.py watcher/api/controllers/rest_api_version_history.rst watcher/api/controllers/root.py watcher/api/controllers/v1/__init__.py watcher/api/controllers/v1/action.py watcher/api/controllers/v1/action_plan.py watcher/api/controllers/v1/audit.py watcher/api/controllers/v1/audit_template.py watcher/api/controllers/v1/collection.py watcher/api/controllers/v1/data_model.py watcher/api/controllers/v1/efficacy_indicator.py watcher/api/controllers/v1/goal.py watcher/api/controllers/v1/scoring_engine.py watcher/api/controllers/v1/service.py watcher/api/controllers/v1/strategy.py watcher/api/controllers/v1/types.py watcher/api/controllers/v1/utils.py watcher/api/controllers/v1/versions.py watcher/api/controllers/v1/webhooks.py watcher/api/middleware/__init__.py watcher/api/middleware/auth_token.py watcher/api/middleware/parsable_error.py watcher/applier/__init__.py watcher/applier/base.py watcher/applier/default.py watcher/applier/manager.py watcher/applier/rpcapi.py watcher/applier/sync.py watcher/applier/action_plan/__init__.py watcher/applier/action_plan/base.py watcher/applier/action_plan/default.py watcher/applier/actions/__init__.py watcher/applier/actions/base.py watcher/applier/actions/change_node_power_state.py watcher/applier/actions/change_nova_service_state.py watcher/applier/actions/factory.py watcher/applier/actions/migration.py watcher/applier/actions/nop.py watcher/applier/actions/resize.py watcher/applier/actions/sleep.py watcher/applier/actions/volume_migration.py watcher/applier/loading/__init__.py watcher/applier/loading/default.py watcher/applier/messaging/__init__.py watcher/applier/messaging/trigger.py watcher/applier/workflow_engine/__init__.py watcher/applier/workflow_engine/base.py watcher/applier/workflow_engine/default.py watcher/cmd/__init__.py watcher/cmd/api.py watcher/cmd/applier.py watcher/cmd/dbmanage.py watcher/cmd/decisionengine.py watcher/cmd/status.py watcher/cmd/sync.py watcher/common/__init__.py watcher/common/cinder_helper.py watcher/common/clients.py watcher/common/config.py watcher/common/context.py watcher/common/exception.py watcher/common/ironic_helper.py watcher/common/keystone_helper.py watcher/common/nova_helper.py watcher/common/paths.py watcher/common/placement_helper.py watcher/common/policy.py watcher/common/rpc.py watcher/common/scheduling.py watcher/common/service.py watcher/common/service_manager.py watcher/common/utils.py watcher/common/loader/__init__.py watcher/common/loader/base.py watcher/common/loader/default.py watcher/common/loader/loadable.py watcher/common/metal_helper/__init__.py watcher/common/metal_helper/base.py watcher/common/metal_helper/constants.py watcher/common/metal_helper/factory.py watcher/common/metal_helper/ironic.py watcher/common/metal_helper/maas.py watcher/common/policies/__init__.py watcher/common/policies/action.py watcher/common/policies/action_plan.py watcher/common/policies/audit.py watcher/common/policies/audit_template.py watcher/common/policies/base.py watcher/common/policies/data_model.py watcher/common/policies/goal.py watcher/common/policies/scoring_engine.py watcher/common/policies/service.py watcher/common/policies/strategy.py watcher/conf/__init__.py watcher/conf/api.py watcher/conf/applier.py watcher/conf/cinder_client.py watcher/conf/clients_auth.py watcher/conf/collector.py watcher/conf/datasources.py watcher/conf/db.py watcher/conf/decision_engine.py watcher/conf/exception.py watcher/conf/glance_client.py watcher/conf/gnocchi_client.py watcher/conf/grafana_client.py watcher/conf/grafana_translators.py watcher/conf/ironic_client.py watcher/conf/keystone_client.py watcher/conf/maas_client.py watcher/conf/monasca_client.py watcher/conf/neutron_client.py watcher/conf/nova_client.py watcher/conf/opts.py watcher/conf/paths.py watcher/conf/placement_client.py watcher/conf/planner.py watcher/conf/plugins.py watcher/conf/prometheus_client.py watcher/conf/service.py watcher/db/__init__.py watcher/db/api.py watcher/db/migration.py watcher/db/purge.py watcher/db/sqlalchemy/__init__.py watcher/db/sqlalchemy/alembic.ini watcher/db/sqlalchemy/api.py watcher/db/sqlalchemy/job_store.py watcher/db/sqlalchemy/migration.py watcher/db/sqlalchemy/models.py watcher/db/sqlalchemy/alembic/README.rst watcher/db/sqlalchemy/alembic/env.py watcher/db/sqlalchemy/alembic/script.py.mako watcher/db/sqlalchemy/alembic/versions/001_ocata.py watcher/db/sqlalchemy/alembic/versions/0f6042416884_add_apscheduler_jobs.py watcher/db/sqlalchemy/alembic/versions/3cfc94cecf4e_add_name_for_audit.py watcher/db/sqlalchemy/alembic/versions/4b16194c56bc_add_start_end_time.py watcher/db/sqlalchemy/alembic/versions/52804f2498c4_add_hostname.py watcher/db/sqlalchemy/alembic/versions/609bec748f2a_add_force_field.py watcher/db/sqlalchemy/alembic/versions/a86240e89a29_.py watcher/db/sqlalchemy/alembic/versions/d098df6021e2_cron_support_for_audit.py watcher/db/sqlalchemy/alembic/versions/d09a5945e4a0_add_action_description_table.py watcher/decision_engine/__init__.py watcher/decision_engine/gmr.py watcher/decision_engine/manager.py watcher/decision_engine/rpcapi.py watcher/decision_engine/scheduling.py watcher/decision_engine/sync.py watcher/decision_engine/threading.py watcher/decision_engine/audit/__init__.py watcher/decision_engine/audit/base.py watcher/decision_engine/audit/continuous.py watcher/decision_engine/audit/event.py watcher/decision_engine/audit/oneshot.py watcher/decision_engine/datasources/__init__.py watcher/decision_engine/datasources/base.py watcher/decision_engine/datasources/gnocchi.py watcher/decision_engine/datasources/grafana.py watcher/decision_engine/datasources/manager.py watcher/decision_engine/datasources/monasca.py watcher/decision_engine/datasources/prometheus.py watcher/decision_engine/datasources/grafana_translator/__init__.py watcher/decision_engine/datasources/grafana_translator/base.py watcher/decision_engine/datasources/grafana_translator/influxdb.py watcher/decision_engine/goal/__init__.py watcher/decision_engine/goal/base.py watcher/decision_engine/goal/goals.py watcher/decision_engine/goal/efficacy/__init__.py watcher/decision_engine/goal/efficacy/base.py watcher/decision_engine/goal/efficacy/indicators.py watcher/decision_engine/goal/efficacy/specs.py watcher/decision_engine/loading/__init__.py watcher/decision_engine/loading/default.py watcher/decision_engine/messaging/__init__.py watcher/decision_engine/messaging/audit_endpoint.py watcher/decision_engine/messaging/data_model_endpoint.py watcher/decision_engine/model/__init__.py watcher/decision_engine/model/base.py watcher/decision_engine/model/model_root.py watcher/decision_engine/model/collector/__init__.py watcher/decision_engine/model/collector/base.py watcher/decision_engine/model/collector/cinder.py watcher/decision_engine/model/collector/ironic.py watcher/decision_engine/model/collector/manager.py watcher/decision_engine/model/collector/nova.py watcher/decision_engine/model/element/__init__.py watcher/decision_engine/model/element/baremetal_resource.py watcher/decision_engine/model/element/base.py watcher/decision_engine/model/element/compute_resource.py watcher/decision_engine/model/element/instance.py watcher/decision_engine/model/element/node.py watcher/decision_engine/model/element/storage_resource.py watcher/decision_engine/model/element/volume.py watcher/decision_engine/model/notification/__init__.py watcher/decision_engine/model/notification/base.py watcher/decision_engine/model/notification/cinder.py watcher/decision_engine/model/notification/filtering.py watcher/decision_engine/model/notification/nova.py watcher/decision_engine/planner/__init__.py watcher/decision_engine/planner/base.py watcher/decision_engine/planner/manager.py watcher/decision_engine/planner/node_resource_consolidation.py watcher/decision_engine/planner/weight.py watcher/decision_engine/planner/workload_stabilization.py watcher/decision_engine/scope/__init__.py watcher/decision_engine/scope/baremetal.py watcher/decision_engine/scope/base.py watcher/decision_engine/scope/compute.py watcher/decision_engine/scope/storage.py watcher/decision_engine/scoring/__init__.py watcher/decision_engine/scoring/base.py watcher/decision_engine/scoring/dummy_scorer.py watcher/decision_engine/scoring/dummy_scoring_container.py watcher/decision_engine/scoring/scoring_factory.py watcher/decision_engine/solution/__init__.py watcher/decision_engine/solution/base.py watcher/decision_engine/solution/default.py watcher/decision_engine/solution/efficacy.py watcher/decision_engine/solution/solution_comparator.py watcher/decision_engine/solution/solution_evaluator.py watcher/decision_engine/strategy/__init__.py watcher/decision_engine/strategy/common/__init__.py watcher/decision_engine/strategy/common/level.py watcher/decision_engine/strategy/context/__init__.py watcher/decision_engine/strategy/context/base.py watcher/decision_engine/strategy/context/default.py watcher/decision_engine/strategy/selection/__init__.py watcher/decision_engine/strategy/selection/base.py watcher/decision_engine/strategy/selection/default.py watcher/decision_engine/strategy/strategies/__init__.py watcher/decision_engine/strategy/strategies/actuation.py watcher/decision_engine/strategy/strategies/base.py watcher/decision_engine/strategy/strategies/basic_consolidation.py watcher/decision_engine/strategy/strategies/dummy_strategy.py watcher/decision_engine/strategy/strategies/dummy_with_resize.py watcher/decision_engine/strategy/strategies/dummy_with_scorer.py watcher/decision_engine/strategy/strategies/host_maintenance.py watcher/decision_engine/strategy/strategies/node_resource_consolidation.py watcher/decision_engine/strategy/strategies/noisy_neighbor.py watcher/decision_engine/strategy/strategies/outlet_temp_control.py watcher/decision_engine/strategy/strategies/saving_energy.py watcher/decision_engine/strategy/strategies/storage_capacity_balance.py watcher/decision_engine/strategy/strategies/uniform_airflow.py watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py watcher/decision_engine/strategy/strategies/workload_balance.py watcher/decision_engine/strategy/strategies/workload_stabilization.py watcher/decision_engine/strategy/strategies/zone_migration.py watcher/hacking/__init__.py watcher/hacking/checks.py watcher/locale/de/LC_MESSAGES/watcher.po watcher/locale/en_GB/LC_MESSAGES/watcher.po watcher/notifications/__init__.py watcher/notifications/action.py watcher/notifications/action_plan.py watcher/notifications/audit.py watcher/notifications/base.py watcher/notifications/exception.py watcher/notifications/goal.py watcher/notifications/service.py watcher/notifications/strategy.py watcher/objects/__init__.py watcher/objects/action.py watcher/objects/action_description.py watcher/objects/action_plan.py watcher/objects/audit.py watcher/objects/audit_template.py watcher/objects/base.py watcher/objects/efficacy_indicator.py watcher/objects/fields.py watcher/objects/goal.py watcher/objects/scoring_engine.py watcher/objects/service.py watcher/objects/strategy.py watcher/tests/__init__.py watcher/tests/base.py watcher/tests/conf_fixture.py watcher/tests/config.py watcher/tests/fake_policy.py watcher/tests/fakes.py watcher/tests/policy_fixture.py watcher/tests/test_threading.py watcher/tests/api/__init__.py watcher/tests/api/base.py watcher/tests/api/test_base.py watcher/tests/api/test_config.py watcher/tests/api/test_hooks.py watcher/tests/api/test_root.py watcher/tests/api/test_scheduling.py watcher/tests/api/test_utils.py watcher/tests/api/utils.py watcher/tests/api/v1/__init__.py watcher/tests/api/v1/test_actions.py watcher/tests/api/v1/test_actions_plans.py watcher/tests/api/v1/test_audit_templates.py watcher/tests/api/v1/test_audits.py watcher/tests/api/v1/test_data_model.py watcher/tests/api/v1/test_goals.py watcher/tests/api/v1/test_microversions.py watcher/tests/api/v1/test_root.py watcher/tests/api/v1/test_scoring_engines.py watcher/tests/api/v1/test_services.py watcher/tests/api/v1/test_strategies.py watcher/tests/api/v1/test_types.py watcher/tests/api/v1/test_utils.py watcher/tests/api/v1/test_webhooks.py watcher/tests/applier/__init__.py watcher/tests/applier/test_applier_manager.py watcher/tests/applier/test_rpcapi.py watcher/tests/applier/test_sync.py watcher/tests/applier/action_plan/__init__.py watcher/tests/applier/action_plan/test_default_action_handler.py watcher/tests/applier/actions/__init__.py watcher/tests/applier/actions/test_change_node_power_state.py watcher/tests/applier/actions/test_change_nova_service_state.py watcher/tests/applier/actions/test_migration.py watcher/tests/applier/actions/test_resize.py watcher/tests/applier/actions/test_sleep.py watcher/tests/applier/actions/test_volume_migration.py watcher/tests/applier/actions/loading/__init__.py watcher/tests/applier/actions/loading/test_default_actions_loader.py watcher/tests/applier/messaging/__init__.py watcher/tests/applier/messaging/test_trigger_action_plan_endpoint.py watcher/tests/applier/workflow_engine/__init__.py watcher/tests/applier/workflow_engine/test_default_workflow_engine.py watcher/tests/applier/workflow_engine/test_taskflow_action_container.py watcher/tests/applier/workflow_engine/loading/__init__.py watcher/tests/applier/workflow_engine/loading/test_default_engine_loader.py watcher/tests/cmd/__init__.py watcher/tests/cmd/test_api.py watcher/tests/cmd/test_applier.py watcher/tests/cmd/test_db_manage.py watcher/tests/cmd/test_decision_engine.py watcher/tests/cmd/test_status.py watcher/tests/common/__init__.py watcher/tests/common/test_cinder_helper.py watcher/tests/common/test_clients.py watcher/tests/common/test_ironic_helper.py watcher/tests/common/test_nova_helper.py watcher/tests/common/test_placement_helper.py watcher/tests/common/test_scheduling.py watcher/tests/common/test_service.py watcher/tests/common/test_utils.py watcher/tests/common/loader/__init__.py watcher/tests/common/loader/test_loader.py watcher/tests/common/metal_helper/__init__.py watcher/tests/common/metal_helper/test_base.py watcher/tests/common/metal_helper/test_factory.py watcher/tests/common/metal_helper/test_ironic.py watcher/tests/common/metal_helper/test_maas.py watcher/tests/conf/__init__.py watcher/tests/conf/test_list_opts.py watcher/tests/db/__init__.py watcher/tests/db/base.py watcher/tests/db/test_action.py watcher/tests/db/test_action_description.py watcher/tests/db/test_action_plan.py watcher/tests/db/test_audit.py watcher/tests/db/test_audit_template.py watcher/tests/db/test_efficacy_indicator.py watcher/tests/db/test_goal.py watcher/tests/db/test_purge.py watcher/tests/db/test_scoring_engine.py watcher/tests/db/test_service.py watcher/tests/db/test_strategy.py watcher/tests/db/utils.py watcher/tests/decision_engine/__init__.py watcher/tests/decision_engine/fake_goals.py watcher/tests/decision_engine/fake_metal_helper.py watcher/tests/decision_engine/fake_strategies.py watcher/tests/decision_engine/test_gmr.py watcher/tests/decision_engine/test_rpcapi.py watcher/tests/decision_engine/test_scheduling.py watcher/tests/decision_engine/test_sync.py watcher/tests/decision_engine/audit/__init__.py watcher/tests/decision_engine/audit/test_audit_handlers.py watcher/tests/decision_engine/cluster/__init__.py watcher/tests/decision_engine/cluster/test_cinder_cdmc.py watcher/tests/decision_engine/cluster/test_cluster_data_model_collector.py watcher/tests/decision_engine/cluster/test_nova_cdmc.py watcher/tests/decision_engine/datasources/__init__.py watcher/tests/decision_engine/datasources/test_base.py watcher/tests/decision_engine/datasources/test_gnocchi_helper.py watcher/tests/decision_engine/datasources/test_grafana_helper.py watcher/tests/decision_engine/datasources/test_manager.py watcher/tests/decision_engine/datasources/test_monasca_helper.py watcher/tests/decision_engine/datasources/test_prometheus_helper.py watcher/tests/decision_engine/datasources/grafana_translators/__init__.py watcher/tests/decision_engine/datasources/grafana_translators/test_base.py watcher/tests/decision_engine/datasources/grafana_translators/test_influxdb.py watcher/tests/decision_engine/event_consumer/__init__.py watcher/tests/decision_engine/loading/__init__.py watcher/tests/decision_engine/loading/test_collector_loader.py watcher/tests/decision_engine/loading/test_default_planner_loader.py watcher/tests/decision_engine/loading/test_default_strategy_loader.py watcher/tests/decision_engine/loading/test_goal_loader.py watcher/tests/decision_engine/messaging/__init__.py watcher/tests/decision_engine/messaging/test_audit_endpoint.py watcher/tests/decision_engine/messaging/test_data_model_endpoint.py watcher/tests/decision_engine/model/__init__.py watcher/tests/decision_engine/model/faker_cluster_and_metrics.py watcher/tests/decision_engine/model/faker_cluster_state.py watcher/tests/decision_engine/model/gnocchi_metrics.py watcher/tests/decision_engine/model/monasca_metrics.py watcher/tests/decision_engine/model/test_element.py watcher/tests/decision_engine/model/test_model.py watcher/tests/decision_engine/model/data/ironic_scenario_1.xml watcher/tests/decision_engine/model/data/scenario_1.xml watcher/tests/decision_engine/model/data/scenario_10.xml watcher/tests/decision_engine/model/data/scenario_1_with_1_node_unavailable.xml watcher/tests/decision_engine/model/data/scenario_1_with_all_instances_exclude.xml watcher/tests/decision_engine/model/data/scenario_1_with_all_nodes_disable.xml watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml watcher/tests/decision_engine/model/data/scenario_8_with_4_nodes.xml watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabled_nodes.xml watcher/tests/decision_engine/model/data/storage_scenario_1.xml watcher/tests/decision_engine/model/notification/__init__.py watcher/tests/decision_engine/model/notification/fake_managers.py watcher/tests/decision_engine/model/notification/test_cinder_notifications.py watcher/tests/decision_engine/model/notification/test_notifications.py watcher/tests/decision_engine/model/notification/test_nova_notifications.py watcher/tests/decision_engine/model/notification/data/capacity.json watcher/tests/decision_engine/model/notification/data/instance-create-end.json watcher/tests/decision_engine/model/notification/data/instance-delete-end.json watcher/tests/decision_engine/model/notification/data/instance-live_migration_force_complete-end.json watcher/tests/decision_engine/model/notification/data/instance-live_migration_post-end.json watcher/tests/decision_engine/model/notification/data/instance-lock.json watcher/tests/decision_engine/model/notification/data/instance-pause-end.json watcher/tests/decision_engine/model/notification/data/instance-power_off-end.json watcher/tests/decision_engine/model/notification/data/instance-power_on-end.json watcher/tests/decision_engine/model/notification/data/instance-rebuild-end.json watcher/tests/decision_engine/model/notification/data/instance-rescue-end.json watcher/tests/decision_engine/model/notification/data/instance-resize_confirm-end.json watcher/tests/decision_engine/model/notification/data/instance-restore-end.json watcher/tests/decision_engine/model/notification/data/instance-resume-end.json watcher/tests/decision_engine/model/notification/data/instance-shelve-end.json watcher/tests/decision_engine/model/notification/data/instance-shutdown-end.json watcher/tests/decision_engine/model/notification/data/instance-soft_delete-end.json watcher/tests/decision_engine/model/notification/data/instance-suspend-end.json watcher/tests/decision_engine/model/notification/data/instance-unlock.json watcher/tests/decision_engine/model/notification/data/instance-unpause-end.json watcher/tests/decision_engine/model/notification/data/instance-unrescue-end.json watcher/tests/decision_engine/model/notification/data/instance-unshelve-end.json watcher/tests/decision_engine/model/notification/data/instance-update.json watcher/tests/decision_engine/model/notification/data/scenario3_instance-update.json watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instance-update.json watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy_instance-update.json watcher/tests/decision_engine/model/notification/data/scenario3_service-update-disabled.json watcher/tests/decision_engine/model/notification/data/scenario3_service-update-enabled.json watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volume-create.json watcher/tests/decision_engine/model/notification/data/scenario_1_capacity.json watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_notfound.json watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_notfound.json watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-create.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_pool_notfound.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.json watcher/tests/decision_engine/model/notification/data/service-create.json watcher/tests/decision_engine/model/notification/data/service-delete.json watcher/tests/decision_engine/model/notification/data/service-update.json watcher/tests/decision_engine/planner/__init__.py watcher/tests/decision_engine/planner/test_node_resource_consolidation.py watcher/tests/decision_engine/planner/test_planner_manager.py watcher/tests/decision_engine/planner/test_weight_planner.py watcher/tests/decision_engine/planner/test_workload_stabilization_planner.py watcher/tests/decision_engine/scope/__init__.py watcher/tests/decision_engine/scope/fake_scopes.py watcher/tests/decision_engine/scope/test_baremetal.py watcher/tests/decision_engine/scope/test_compute.py watcher/tests/decision_engine/scope/test_storage.py watcher/tests/decision_engine/scoring/__init__.py watcher/tests/decision_engine/scoring/test_dummy_scorer.py watcher/tests/decision_engine/scoring/test_dummy_scoring_container.py watcher/tests/decision_engine/scoring/test_scoring_factory.py watcher/tests/decision_engine/solution/__init__.py watcher/tests/decision_engine/solution/test_default_solution.py watcher/tests/decision_engine/strategy/__init__.py watcher/tests/decision_engine/strategy/context/__init__.py watcher/tests/decision_engine/strategy/context/test_strategy_context.py watcher/tests/decision_engine/strategy/selector/__init__.py watcher/tests/decision_engine/strategy/selector/test_strategy_selector.py watcher/tests/decision_engine/strategy/strategies/__init__.py watcher/tests/decision_engine/strategy/strategies/test_actuator.py watcher/tests/decision_engine/strategy/strategies/test_base.py watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py watcher/tests/decision_engine/strategy/strategies/test_host_maintenance.py watcher/tests/decision_engine/strategy/strategies/test_node_resource_consolidation.py watcher/tests/decision_engine/strategy/strategies/test_noisy_neighbor.py watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py watcher/tests/decision_engine/strategy/strategies/test_saving_energy.py watcher/tests/decision_engine/strategy/strategies/test_storage_capacity_balance.py watcher/tests/decision_engine/strategy/strategies/test_strategy_endpoint.py watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py watcher/tests/decision_engine/strategy/strategies/test_zone_migration.py watcher/tests/notifications/__init__.py watcher/tests/notifications/test_action_notification.py watcher/tests/notifications/test_action_plan_notification.py watcher/tests/notifications/test_audit_notification.py watcher/tests/notifications/test_notification.py watcher/tests/notifications/test_service_notifications.py watcher/tests/objects/__init__.py watcher/tests/objects/test_action.py watcher/tests/objects/test_action_description.py watcher/tests/objects/test_action_plan.py watcher/tests/objects/test_audit.py watcher/tests/objects/test_audit_template.py watcher/tests/objects/test_efficacy_indicator.py watcher/tests/objects/test_goal.py watcher/tests/objects/test_objects.py watcher/tests/objects/test_scoring_engine.py watcher/tests/objects/test_service.py watcher/tests/objects/test_strategy.py watcher/tests/objects/utils.py././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591600.0 python_watcher-14.0.0/python_watcher.egg-info/dependency_links.txt0000664000175000017500000000000100000000000025453 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591600.0 python_watcher-14.0.0/python_watcher.egg-info/entry_points.txt0000664000175000017500000001045400000000000024707 0ustar00zuulzuul00000000000000[console_scripts] watcher-api = watcher.cmd.api:main watcher-applier = watcher.cmd.applier:main watcher-db-manage = watcher.cmd.dbmanage:main watcher-decision-engine = watcher.cmd.decisionengine:main watcher-status = watcher.cmd.status:main watcher-sync = watcher.cmd.sync:main [oslo.config.opts] watcher = watcher.conf.opts:list_opts [oslo.policy.enforcer] watcher = watcher.common.policy:get_enforcer [oslo.policy.policies] watcher = watcher.common.policies:list_rules [watcher.database.migration_backend] sqlalchemy = watcher.db.sqlalchemy.migration [watcher_actions] change_node_power_state = watcher.applier.actions.change_node_power_state:ChangeNodePowerState change_nova_service_state = watcher.applier.actions.change_nova_service_state:ChangeNovaServiceState migrate = watcher.applier.actions.migration:Migrate nop = watcher.applier.actions.nop:Nop resize = watcher.applier.actions.resize:Resize sleep = watcher.applier.actions.sleep:Sleep volume_migrate = watcher.applier.actions.volume_migration:VolumeMigrate [watcher_cluster_data_model_collectors] baremetal = watcher.decision_engine.model.collector.ironic:BaremetalClusterDataModelCollector compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector [watcher_goals] airflow_optimization = watcher.decision_engine.goal.goals:AirflowOptimization cluster_maintaining = watcher.decision_engine.goal.goals:ClusterMaintaining dummy = watcher.decision_engine.goal.goals:Dummy hardware_maintenance = watcher.decision_engine.goal.goals:HardwareMaintenance noisy_neighbor = watcher.decision_engine.goal.goals:NoisyNeighborOptimization saving_energy = watcher.decision_engine.goal.goals:SavingEnergy server_consolidation = watcher.decision_engine.goal.goals:ServerConsolidation thermal_optimization = watcher.decision_engine.goal.goals:ThermalOptimization unclassified = watcher.decision_engine.goal.goals:Unclassified workload_balancing = watcher.decision_engine.goal.goals:WorkloadBalancing [watcher_planners] node_resource_consolidation = watcher.decision_engine.planner.node_resource_consolidation:NodeResourceConsolidationPlanner weight = watcher.decision_engine.planner.weight:WeightPlanner workload_stabilization = watcher.decision_engine.planner.workload_stabilization:WorkloadStabilizationPlanner [watcher_scoring_engine_containers] dummy_scoring_container = watcher.decision_engine.scoring.dummy_scoring_container:DummyScoringContainer [watcher_scoring_engines] dummy_scorer = watcher.decision_engine.scoring.dummy_scorer:DummyScorer [watcher_strategies] actuator = watcher.decision_engine.strategy.strategies.actuation:Actuator basic = watcher.decision_engine.strategy.strategies.basic_consolidation:BasicConsolidation dummy = watcher.decision_engine.strategy.strategies.dummy_strategy:DummyStrategy dummy_with_resize = watcher.decision_engine.strategy.strategies.dummy_with_resize:DummyWithResize dummy_with_scorer = watcher.decision_engine.strategy.strategies.dummy_with_scorer:DummyWithScorer host_maintenance = watcher.decision_engine.strategy.strategies.host_maintenance:HostMaintenance node_resource_consolidation = watcher.decision_engine.strategy.strategies.node_resource_consolidation:NodeResourceConsolidation noisy_neighbor = watcher.decision_engine.strategy.strategies.noisy_neighbor:NoisyNeighbor outlet_temperature = watcher.decision_engine.strategy.strategies.outlet_temp_control:OutletTempControl saving_energy = watcher.decision_engine.strategy.strategies.saving_energy:SavingEnergy storage_capacity_balance = watcher.decision_engine.strategy.strategies.storage_capacity_balance:StorageCapacityBalance uniform_airflow = watcher.decision_engine.strategy.strategies.uniform_airflow:UniformAirflow vm_workload_consolidation = watcher.decision_engine.strategy.strategies.vm_workload_consolidation:VMWorkloadConsolidation workload_balance = watcher.decision_engine.strategy.strategies.workload_balance:WorkloadBalance workload_stabilization = watcher.decision_engine.strategy.strategies.workload_stabilization:WorkloadStabilization zone_migration = watcher.decision_engine.strategy.strategies.zone_migration:ZoneMigration [watcher_workflow_engines] taskflow = watcher.applier.workflow_engine.default:DefaultWorkFlowEngine [wsgi_scripts] watcher-api-wsgi = watcher.api.wsgi:initialize_wsgi_app ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591600.0 python_watcher-14.0.0/python_watcher.egg-info/not-zip-safe0000664000175000017500000000000100000000000023633 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591600.0 python_watcher-14.0.0/python_watcher.egg-info/pbr.json0000664000175000017500000000005700000000000023065 0ustar00zuulzuul00000000000000{"git_version": "f2ee231f", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591600.0 python_watcher-14.0.0/python_watcher.egg-info/requires.txt0000664000175000017500000000171000000000000024004 0ustar00zuulzuul00000000000000apscheduler>=3.5.1 eventlet>=0.27.0 jsonpatch>=1.21 keystoneauth1>=3.4.0 jsonschema>=3.2.0 keystonemiddleware>=4.21.0 lxml>=4.5.1 croniter>=0.3.20 os-resource-classes>=0.4.0 oslo.concurrency>=3.26.0 oslo.cache>=1.29.0 oslo.config>=6.8.0 oslo.context>=2.21.0 oslo.db>=4.44.0 oslo.i18n>=3.20.0 oslo.log>=3.37.0 oslo.messaging>=14.1.0 oslo.policy>=4.5.0 oslo.reports>=1.27.0 oslo.serialization>=2.25.0 oslo.service>=1.30.0 oslo.upgradecheck>=1.3.0 oslo.utils>=7.0.0 oslo.versionedobjects>=1.32.0 PasteDeploy>=1.5.2 pbr>=3.1.1 pecan>=1.3.2 PrettyTable>=0.7.2 gnocchiclient>=7.0.1 python-cinderclient>=3.5.0 python-glanceclient>=2.9.1 python-keystoneclient>=3.15.0 python-monascaclient>=1.12.0 python-neutronclient>=6.7.0 python-novaclient>=14.1.0 python-observabilityclient>=0.3.0 python-openstackclient>=3.14.0 python-ironicclient>=2.5.0 SQLAlchemy>=1.2.5 stevedore>=1.28.0 taskflow>=3.8.0 WebOb>=1.8.5 WSME>=0.9.2 networkx>=2.4 microversion_parse>=0.2.1 futurist>=1.8.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591600.0 python_watcher-14.0.0/python_watcher.egg-info/top_level.txt0000664000175000017500000000001000000000000024126 0ustar00zuulzuul00000000000000watcher ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6191351 python_watcher-14.0.0/rally-jobs/0000775000175000017500000000000000000000000016733 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/rally-jobs/README.rst0000664000175000017500000000261500000000000020426 0ustar00zuulzuul00000000000000Rally job ========= We provide, with Watcher, a Rally plugin you can use to benchmark the optimization service. To launch this task with configured Rally you just need to run: :: rally task start watcher/rally-jobs/watcher-watcher.yaml Structure --------- * plugins - directory where you can add rally plugins. Almost everything in Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute paths in rally tasks. Files will be located in ~/.rally/extra/* * watcher.yaml is a task that is run in gates against OpenStack deployed by DevStack Useful links ------------ * How to install: https://docs.openstack.org/rally/latest/install_and_upgrade/install.html * How to set Rally up and launch your first scenario: https://rally.readthedocs.io/en/latest/quick_start/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html * More about Rally: https://docs.openstack.org/rally/latest/ * Rally project info and release notes: https://docs.openstack.org/rally/latest/project_info/index.html * How to add rally-gates: https://docs.openstack.org/rally/latest/quick_start/gates.html#gate-jobs * About plugins: https://docs.openstack.org/rally/latest/plugins/index.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/rally-jobs/watcher-watcher.yaml0000664000175000017500000000253200000000000022711 0ustar00zuulzuul00000000000000--- Watcher.create_audit_and_delete: - runner: type: "constant" times: 10 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 audit_templates: audit_templates_per_admin: 5 fill_strategy: "round_robin" params: - goal: name: "dummy" strategy: name: "dummy" sla: failure_rate: max: 0 Watcher.create_audit_template_and_delete: - args: goal: name: "dummy" strategy: name: "dummy" runner: type: "constant" times: 10 concurrency: 2 sla: failure_rate: max: 0 Watcher.list_audit_templates: - runner: type: "constant" times: 10 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 audit_templates: audit_templates_per_admin: 5 fill_strategy: "random" params: - goal: name: "workload_balancing" strategy: name: "workload_stabilization" - goal: name: "dummy" strategy: name: "dummy" sla: failure_rate: max: 0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.5991352 python_watcher-14.0.0/releasenotes/0000775000175000017500000000000000000000000017346 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6231353 python_watcher-14.0.0/releasenotes/notes/0000775000175000017500000000000000000000000020476 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/.placeholder0000664000175000017500000000000000000000000022747 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/2025.1-prelude-8be97eece4e1d1ff.yaml0000664000175000017500000000372600000000000026310 0ustar00zuulzuul00000000000000--- prelude: | The ``Openstack 2025.1`` (``Watcher 14.0.0``) includes several new features, deprecations, and removals. After a period of inactivity, the Watcher project moved to the Distributed leadership model in ``2025.1`` with several new contributors working to modernize the code base. Activity this cycle was mainly focused on paying down technical debt related to supporting newer testing runtimes. With this release, ``ubuntu 24.04`` is now officially tested and supported. ``Ubuntu 24.04`` brings a new default Python runtime ``3.12`` and with it improvements to eventlet and SQLAlchemy 2.0 compatibility where required. ``2025.1`` is the last release to officially support and test with ``Ubuntu 22.04``. ``2025.1`` is the second official skip-level upgrade release supporting upgrades from either ``2024.1`` or ``2024.2`` Another area of focus in this cycle was the data sources supported by Watcher. The long obsolete `Ceilometer` API data source has been removed, and the untested `Monasca` data source has been deprecated and a new `Prometheus` data source has been added. https://specs.openstack.org/openstack/watcher-specs/specs/2025.1/approved/prometheus-datasource.html fixes: - https://bugs.launchpad.net/watcher/+bug/2086710 watcher compatibility between eventlet, apscheduler, and python 3.12 - https://bugs.launchpad.net/watcher/+bug/2067815 refactoring of the SQLAlchemy database layer to improve compatibility with eventlet on newer Pythons - A number of linting issues were addressed with the introduction of pre-commit. The issues include but are not limited to, spelling and grammar fixes across all documentation and code, numerous sphinx documentation build warnings , and incorrect file permission such as files having the execute bit set when not required. While none of these changes should affect the runtime behavior of Watcher, they generally improve the maintainability and quality of the codebase.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/action-plan-cancel-c54726378019e096.yaml0000664000175000017500000000007700000000000026746 0ustar00zuulzuul00000000000000--- features: - | Adds feature to cancel an action-plan. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=python_watcher-14.0.0/releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yaml 22 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.ya0000664000175000017500000000010300000000000033274 0ustar00zuulzuul00000000000000--- features: - Add notifications related to Action plan object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/action-versioned-notifications-api-ff94fc0f401292d0.yaml0000664000175000017500000000007600000000000032467 0ustar00zuulzuul00000000000000--- features: - Add notifications related to Action object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/add-baremetal-scoper-9ef23f5fb8f0be6a.yaml0000664000175000017500000000013500000000000030001 0ustar00zuulzuul00000000000000--- features: - Baremetal Model gets Audit scoper with an ability to exclude Ironic nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/add-force-field-to-audit-4bcaeedfe27233ad.yaml0000664000175000017500000000042700000000000030524 0ustar00zuulzuul00000000000000--- features: - | Add force field to Audit. User can set --force to enable the new option when launching audit. If force is True, audit will be executed despite of ongoing actionplan. The new audit may create a wrong actionplan if they use the same data model. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/add-ha-support-b9042255e5b76e42.yaml0000664000175000017500000000050200000000000026265 0ustar00zuulzuul00000000000000--- features: - Watcher services can be launched in HA mode. From now on Watcher Decision Engine and Watcher Applier services may be deployed on different nodes to run in active-active or active-passive mode. Any ONGOING Audits or Action Plans will be CANCELLED if service they are executed on is restarted. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=python_watcher-14.0.0/releasenotes/notes/add-instance-metrics-to-prometheus-datasource-9fba8c174ff845e1.yaml 22 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/add-instance-metrics-to-prometheus-datasource-9fba8c174ff840000664000175000017500000000035000000000000033354 0ustar00zuulzuul00000000000000--- features: - | Support for instance metrics has been added to the prometheus data source. The included metrics are `instance_cpu_usage`, `instance_ram_usage`, `instance_ram_allocated` and `instance_root_disk_size`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/add-name-for-audit-0df1f39f00736f06.yaml0000664000175000017500000000020100000000000027045 0ustar00zuulzuul00000000000000--- features: - Audits have 'name' field now, that is more friendly to end users. Audit's name can't exceed 63 characters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/add-plugins-parameters-376eb6b0b8978b44.yaml0000664000175000017500000000042200000000000030100 0ustar00zuulzuul00000000000000--- features: - Added a standard way to both declare and fetch configuration options so that whenever the administrator generates the Watcher configuration sample file, it contains the configuration options of the plugins that are currently available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/add-power-on-off-a77673d482568a8b.yaml0000664000175000017500000000007300000000000026522 0ustar00zuulzuul00000000000000--- features: - Add action for compute node power on/off ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/add-scoring-module-fa00d013ed2d614e.yaml0000664000175000017500000000041100000000000027306 0ustar00zuulzuul00000000000000--- features: - Added a generic scoring engine module, which will standardize interactions with scoring engines through the common API. It is possible to use the scoring engine by different Strategies, which improve the code and data model reuse. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=python_watcher-14.0.0/releasenotes/notes/add-start-end-time-for-continuous-audit-52c45052cb06d153.yaml 22 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/add-start-end-time-for-continuous-audit-52c45052cb06d153.ya0000664000175000017500000000023400000000000032546 0ustar00zuulzuul00000000000000--- features: - | Add start_time and end_time fields in audits table. User can set the start time and/or end time when creating CONTINUOUS audit. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/add-upgrade-check-framework-5bb9693c8a78931c.yaml0000664000175000017500000000073000000000000030761 0ustar00zuulzuul00000000000000--- prelude: > Added new tool ``watcher-status upgrade check``. features: - | New framework for ``watcher-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Watcher upgrade to ensure if the upgrade can be performed safely. upgrade: - | Operator can now use new CLI tool ``watcher-status upgrade check`` to check if Watcher deployment can be safely upgraded from N-1 to N release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/api-call-retry-fef741ac684c58dd.yaml0000664000175000017500000000064000000000000026577 0ustar00zuulzuul00000000000000--- features: - | API calls while building the Compute data model will be retried upon failure. The amount of failures allowed before giving up and the time before reattempting are configurable. The `api_call_retries` and `api_query_timeout` parameters in the `[collector]` group can be used to adjust these parameters. 10 retries with a 1 second time in between reattempts is the default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/api-microversioning-7999a3ee8073bf32.yaml0000664000175000017500000000066000000000000027524 0ustar00zuulzuul00000000000000--- features: Watcher starts to support API microversions since Stein cycle. From now onwards all API changes should be made with saving backward compatibility. To specify API version operator should use OpenStack-API-Version HTTP header. If operator wants to know the minimum and maximum supported versions by API, he/she can access /v1 resource and Watcher API will return appropriate headers in response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/audit-scoper-for-storage-data-model-cdccc803542d22db.yaml0000664000175000017500000000024500000000000032556 0ustar00zuulzuul00000000000000--- features: - | Adds audit scoper for storage data model, now watcher users can specify audit scope for storage CDM in the same manner as compute scope. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/audit-tag-vm-metadata-47a3e4468748853c.yaml0000664000175000017500000000040600000000000027454 0ustar00zuulzuul00000000000000--- features: - Added the functionality to filter out instances which have metadata field 'optimize' set to False. For now, this is only available for the basic_consolidation strategy (if "check_optimize_metadata" configuration option is enabled). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/audit-versioned-notifications-api-bca7738e16954bad.yaml0000664000175000017500000000007500000000000032405 0ustar00zuulzuul00000000000000--- features: - Add notifications related to Audit object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/automatic-triggering-audit-8a9b0540d547db60.yaml0000664000175000017500000000031500000000000030744 0ustar00zuulzuul00000000000000--- features: - Watcher can continuously optimize the OpenStack cloud for a specific strategy or goal by triggering an audit periodically which generates an action plan and run it automatically. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/background-jobs-ha-9d3cf3fe356f4705.yaml0000664000175000017500000000022400000000000027250 0ustar00zuulzuul00000000000000--- features: - Added binding between apscheduler job and Watcher decision engine service. It will allow to provide HA support in the future. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/bp-audit-scope-exclude-project-511a7720aac00dff.yaml0000664000175000017500000000033500000000000031544 0ustar00zuulzuul00000000000000--- features: - | Feature to exclude instances from audit scope based on project_id is added. Now instances from particular project in OpenStack can be excluded from audit defining scope in audit templates. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/build-baremetal-data-model-in-watcher-3023453a47b61dab.yaml0000664000175000017500000000007500000000000032576 0ustar00zuulzuul00000000000000--- features: - | Adds baremetal data model in Watcher ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/cdm-scoping-8d9c307bad46bfa1.yaml0000664000175000017500000000034700000000000026143 0ustar00zuulzuul00000000000000--- features: - | Each CDM collector can have its own CDM scoper now. This changed Scope JSON schema definition for the audit template POST data. Please see audit template create help message in python-watcherclient. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/centralise-config-opts-95670987dfbdb0e7.yaml0000664000175000017500000000010400000000000030172 0ustar00zuulzuul00000000000000--- features: - Centralize all configuration options for Watcher. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/change-ram-util-metric-4a3e6984b9dd968d.yaml0000664000175000017500000000043400000000000030064 0ustar00zuulzuul00000000000000--- features: - Enhancement of vm_workload_consolidation strategy by using 'memory.resident' metric in place of 'memory.usage', as memory.usage shows the memory usage inside guest-os and memory.resident represents volume of RAM used by instance on host machine. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/check-strategy-requirements-66f9e9262412f8ec.yaml0000664000175000017500000000041000000000000031170 0ustar00zuulzuul00000000000000--- features: - Added a way to check state of strategy before audit's execution. Administrator can use "watcher strategy state " command to get information about metrics' availability, datasource's availability and CDM's availability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/cinder-model-integration-baa394a72a0a33bf.yaml0000664000175000017500000000007000000000000030575 0ustar00zuulzuul00000000000000--- features: - | Added cinder cluster data model ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/cluster-model-objects-wrapper-9c799ea262c56a5b.yaml0000664000175000017500000000026700000000000031503 0ustar00zuulzuul00000000000000--- features: - Added an in-memory cache of the cluster model built up and kept fresh via notifications from services of interest in addition to periodic syncing logic. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/compute-cdm-include-all-instances-f7506ded2d57732f.yaml0000664000175000017500000000027600000000000032206 0ustar00zuulzuul00000000000000--- features: - Watcher has a whole scope of the cluster, when building compute CDM which includes all instances. It filters excluded instances when migration during the audit.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/configurable-weights-default-planner-3746b33160bc7347.yaml0000664000175000017500000000016600000000000032546 0ustar00zuulzuul00000000000000--- features: - Added a way to add a new action without having to amend the source code of the default planner. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/consume-nova-versioned-notifications-f98361b37e546b4d.yaml0000664000175000017500000000160200000000000033006 0ustar00zuulzuul00000000000000--- features: - | Watcher consumes Nova notifications to update its internal Compute CDM(Cluster Data Model). All the notifications as below pre-existing: * service.update * instance.update * instance.delete.end new: * instance.lock * instance.unlock * instance.pause.end * instance.power_off.end * instance.power_on.end * instance.resize_confirm.end * instance.restore.end * instance.resume.end * instance.shelve.end * instance.shutdown.end * instance.suspend.end * instance.unpause.end * instance.unrescue.end * instance.unshelve.end * instance.rebuild.end * instance.rescue.end * instance.create.end * instance.live_migration_force_complete.end * instance.live_migration_post_dest.end * instance.soft_delete.end * service.create * service.delete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/continuously-optimization-35364f4d2c0b81fc.yaml0000664000175000017500000000017100000000000031071 0ustar00zuulzuul00000000000000--- features: - Added a way to create periodic audit to be able to optimize continuously the cloud infrastructure. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/cron-based-continuous-audits-c3eedf28d9752b37.yaml0000664000175000017500000000055300000000000031405 0ustar00zuulzuul00000000000000--- features: - There is new ability to create Watcher continuous audits with cron interval. It means you may use, for example, optional argument '--interval "\*/5 \* \* \* \*"' to launch audit every 5 minutes. These jobs are executed on a best effort basis and therefore, we recommend you to use a minimal cron interval of at least one minute. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/datasource-query-retry-00cba5f7e68aec39.yaml0000664000175000017500000000137400000000000030404 0ustar00zuulzuul00000000000000--- features: - | All datasources can now be configured to retry retrieving a metric upon encountering an error. Between each attempt will be a set amount of time which can be adjusted from the configuration. These configuration options can be found in the `[watcher_datasources]` group and are named `query_max_retries` and `query_timeout`. upgrade: - | If Gnocchi was configured to have a custom amount of retries and or a custom timeout then the configuration needs to moved into the `[watcher_datasources]` group instead of the `[gnocchi_client]` group. deprecations: - | The configuration options for query retries in `[gnocchi_client]` are deprecated and the option in `[watcher_datasources]` should now be used.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/db-migration-e1a705a8b54ccdd2.yaml0000664000175000017500000000011200000000000026274 0ustar00zuulzuul00000000000000--- features: - Watcher database can now be upgraded thanks to Alembic. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/define-the-audit-scope-e89edc5051dcf3f2.yaml0000664000175000017500000000033400000000000030167 0ustar00zuulzuul00000000000000--- features: - Provides a generic way to define the scope of an audit. The set of audited resources will be called "Audit scope" and will be defined in each audit template (which contains the audit settings). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/deprecate-ceilometer-datasource-446b0be70fbce28b.yaml0000664000175000017500000000046100000000000032144 0ustar00zuulzuul00000000000000--- deprecations: - | Ceilometer Datasource has been deprecated since its API has been deprecated in Ocata cycle. Watcher has supported Ceilometer for some releases after Ocata to let users migrate to Gnocchi/Monasca datasources. Since Train release, Ceilometer support will be removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/deprecate-json-formatted-policy-file-3a92379e9f5dd203.yaml0000664000175000017500000000176000000000000032633 0ustar00zuulzuul00000000000000--- upgrade: - | The default value of ``[oslo_policy] policy_file`` config option has been changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing customized or previously generated static policy JSON files (which are not needed by default), should generate new policy files or convert them in YAML format. Use the `oslopolicy-convert-json-to-yaml `_ tool to convert a JSON to YAML formatted policy file in backward compatible way. deprecations: - | Use of JSON policy files was deprecated by the ``oslo.policy`` library during the Victoria development cycle. As a result, this deprecation is being noted in the Wallaby cycle with an anticipated future removal of support by ``oslo.policy``. As such operators will need to convert to YAML policy files. Please see the upgrade notes for details on migration of any custom policy files. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/deprecate-monasca-ds-9065f4d4bee09ab2.yaml0000664000175000017500000000021300000000000027632 0ustar00zuulzuul00000000000000--- deprecations: - | Monasca Data Source is deprecated and will be removed in the future, due to inactivity of Monasca project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/drop-py-2-7-54f8e806d71f19a7.yaml0000664000175000017500000000030700000000000025432 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of Watcher to support py2.7 is OpenStack Train. The minimum version of Python now supported by Watcher is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/drop-python38-support-eeb19a0bc0160sw1.yaml0000664000175000017500000000027000000000000030124 0ustar00zuulzuul00000000000000--- upgrade: - | Python 3.8 support has been dropped. Last release of watcher supporting python 3.8 is 13.0.0. The minimum version of Python now supported is Python 3.9. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/dynamic-action-description-0e947b9e7ef2a134.yaml0000664000175000017500000000017300000000000031026 0ustar00zuulzuul00000000000000--- features: - Add description property for dynamic action. Admin can see detail information of any specify action. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/efficacy-indicator-95380ad7b84e3be2.yaml0000664000175000017500000000016000000000000027323 0ustar00zuulzuul00000000000000--- features: - Added a way to compare the efficacy of different strategies for a give optimization goal. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/enhance-watcher-applier-engine-86c676ce8f179e68.yaml0000664000175000017500000000147000000000000031510 0ustar00zuulzuul00000000000000--- features: - | Added a new config option 'action_execution_rule' which is a dict type. Its key field is strategy name and the value is 'ALWAYS' or 'ANY'. 'ALWAYS' means the callback function returns True as usual. 'ANY' means the return depends on the result of previous action execution. The callback returns True if previous action gets failed, and the engine continues to run the next action. If previous action executes success, the callback returns False then the next action will be ignored. For strategies that aren't in 'action_execution_rule', the callback always returns True. Please add the next section in the watcher.conf file if your strategy needs this feature. [watcher_workflow_engines.taskflow] action_execution_rule = {'your strategy name': 'ANY'} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/event-driven-optimization-based-4870f112bef8a560.yaml0000664000175000017500000000047600000000000031732 0ustar00zuulzuul00000000000000--- features: - | Add a new webhook API and a new audit type EVENT, the microversion is 1.4. Now Watcher user can create audit with EVENT type and the audit will be triggered by webhook API. The user guide is available online: https://docs.openstack.org/watcher/latest/user/event_type_audit.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/file-based-metric-map-c2af62b5067895df.yaml0000664000175000017500000000110000000000000027626 0ustar00zuulzuul00000000000000--- features: - | Allow using file to override metric map. Override the metric map of each datasource as soon as it is created by the manager. This override comes from a file whose path is provided by a setting in config file. The setting is `watcher_decision_engine/metric_map_path`. The file contains a map per datasource whose keys are the metric names as recognized by watcher and the value is the real name of the metric in the datasource. This setting defaults to `/etc/watcher/metric_map.yaml`, and presence of this file is optional. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=python_watcher-14.0.0/releasenotes/notes/formal-datasource-interface-implementation-222769d55a127d33.yaml 22 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/formal-datasource-interface-implementation-222769d55a127d330000664000175000017500000000141600000000000033010 0ustar00zuulzuul00000000000000--- features: - | Improved interface for datasource baseclass that better defines expected values and types for parameters and return types of all abstract methods. This allows all strategies to work with every datasource provided the metrics are configured for that given datasource. deprecations: - | The new strategy baseclass has significant changes in method parameters and any out-of-tree strategies will have to be adopted. - | Several strategies have changed the `node` parameter to `compute_node` to be better aligned with terminology. These strategies include `basic_consolidation` and `workload_stabilzation`. The `node` parameter will remain supported during Train release and will be removed in the subsequent release. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=python_watcher-14.0.0/releasenotes/notes/general-purpose-decision-engine-threadpool-0711b23abfc9d409.yaml 22 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/general-purpose-decision-engine-threadpool-0711b23abfc9d4090000664000175000017500000000177700000000000033145 0ustar00zuulzuul00000000000000--- prelude: > Many operations in the decision engine will block on I/O. Such I/O operations can stall the execution of a sequential application significantly. To reduce the potential bottleneck of many operations the general purpose decision engine threadpool is introduced. features: - | A new threadpool for the decision engine that contributors can use to improve the performance of many operations, primarily I/O bound ones. The amount of workers used by the decision engine threadpool can be configured to scale according to the available infrastructure using the `watcher_decision_engine.max_general_workers` config option. Documentation for contributors to effectively use this threadpool is available online: https://docs.openstack.org/watcher/latest/contributor/concurrency.html - | The building of the compute (Nova) data model will be done using the decision engine threadpool, thereby, significantly reducing the total time required to build it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/get-goal-from-strategy-396c9b13a38bb650.yaml0000664000175000017500000000025200000000000030015 0ustar00zuulzuul00000000000000--- features: - Added a way to return the of available goals depending on which strategies have been deployed on the node where the decision engine is running. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/global-datasource-preference-3ab47b4be09ff3a5.yaml0000664000175000017500000000060700000000000031442 0ustar00zuulzuul00000000000000--- features: - | Watcher now supports configuring which datasource to use and in which order. This configuration is done by specifying datasources in the watcher_datasources section: - ``[watcher_datasources] datasources = gnocchi,monasca,ceilometer`` Specific strategies can override this order and use datasources which are not listed in the global preference.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/gnocchi-watcher-43c25d391fbd3e9c.yaml0000664000175000017500000000022200000000000026722 0ustar00zuulzuul00000000000000--- features: - Added gnocchi support as data source for metrics. Administrator can change data source for each strategy using config file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/grafana-datasource-b672367c23ffa0c6.yaml0000664000175000017500000000104300000000000027323 0ustar00zuulzuul00000000000000--- features: - | Grafana has been added as datasource that can be used for collecting metrics. The configuration options allow to specify what metrics and how they are stored in grafana so that no matter how Grafana is configured it can still be used. The configuration can be done via the typical configuration file but it is recommended to configure most options in the yaml file for metrics. For a complete walkthrough on configuring Grafana see: https://docs.openstack.org/watcher/latest/datasources/grafana.html././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/graph-based-cluster-model-523937a6f5e66537.yaml0000664000175000017500000000041200000000000030325 0ustar00zuulzuul00000000000000--- features: - The graph model describes how VMs are associated to compute hosts. This allows for seeing relationships upfront between the entities and hence can be used to identify hot/cold spots in the data center and influence a strategy decision. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/host-maintenance-strategy-41f640927948fb56.yaml0000664000175000017500000000055500000000000030500 0ustar00zuulzuul00000000000000--- features: - | Added a strategy for one compute node maintenance, without having the user's application been interrupted. If given one backup node, the strategy will firstly migrate all instances from the maintenance node to the backup node. If the backup node is not provided, it will migrate all instances, relying on nova-scheduler. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/improve-compute-data-model-b427c85e4ed2b6fb.yaml0000664000175000017500000000167600000000000031114 0ustar00zuulzuul00000000000000--- features: - | Watcher can get resource information such as total, allocation ratio and reserved information from Placement API. Now we add some new fields to the Watcher Data Model: * vcpu_reserved: The amount of cpu a node has reserved for its own use. * vcpu_ratio: CPU allocation ratio. * memory_mb_reserved: The amount of memory a node has reserved for its own use. * memory_ratio: Memory allocation ratio. * disk_gb_reserved: The amount of disk a node has reserved for its own use. * disk_ratio: Disk allocation ratio. We also add some new properties: * vcpu_capacity: The amount of vcpu, take allocation ratio into account, but do not include reserved. * memory_mb_capacity: The amount of memory, take allocation ratio into account, but do not include reserved. * disk_gb_capacity: The amount of disk, take allocation ratio into account, but do not include reserved. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/jsonschema-validation-79cab05d5295da00.yaml0000664000175000017500000000012700000000000030052 0ustar00zuulzuul00000000000000--- features: - Added using of JSONSchema instead of voluptuous to validate Actions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/min-required-nova-train-71f124192d88ae52.yaml0000664000175000017500000000042000000000000030112 0ustar00zuulzuul00000000000000--- upgrade: - | The minimum required version of the ``[nova_client]/api_version`` value is now enforced to be ``2.56`` which is available since the Queens version of the nova compute service. A ``watcher-status upgrade check`` has been added for this. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/monasca-support-0b0486b8572ac38b.yaml0000664000175000017500000000014600000000000026650 0ustar00zuulzuul00000000000000--- features: - Watcher supports multiple metrics backend and relies on Ceilometer and Monasca. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/multiple-global-efficacy-indicator-fc11c4844a12a7d5.yaml0000664000175000017500000000041300000000000032376 0ustar00zuulzuul00000000000000--- features: - Watcher got an ability to calculate multiple global efficacy indicators during audit's execution. Now global efficacy can be calculated for many resource types (like volumes, instances, network) if strategy supports efficacy indicators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/node-resource-consolidation-73bc0c0abfeb0b03.yaml0000664000175000017500000000036500000000000031407 0ustar00zuulzuul00000000000000--- features: - | Added strategy "node resource consolidation". This strategy is used to centralize VMs to as few nodes as possible by VM migration. User can set an input parameter to decide how to select the destination node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/noisy-neighbor-strategy-a71342740b59dddc.yaml0000664000175000017500000000030400000000000030364 0ustar00zuulzuul00000000000000--- features: - Added strategy to identify and migrate a Noisy Neighbor - a low priority VM that negatively affects performance of a high priority VM by over utilizing Last Level Cache. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/notifications-actionplan-cancel-edb2a4a12543e2d0.yaml0000664000175000017500000000023300000000000032054 0ustar00zuulzuul00000000000000--- features: - Added notifications about cancelling of action plan. Now event based plugins know when action plan cancel started and completed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/optimization-threshold-21ad38f0470d0e1a.yaml0000664000175000017500000000026600000000000030302 0ustar00zuulzuul00000000000000--- features: - Allow decision engine to pass strategy parameters, like optimization threshold, to selected strategy, also strategy to provide parameters info to end user. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/persistent-audit-parameters-ae41dd7252ba9672.yaml0000664000175000017500000000017700000000000031247 0ustar00zuulzuul00000000000000--- features: - Copy all audit templates parameters into audit instead of having a reference to the audit template. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/planner-storage-action-plan-26ef37893c5e8648.yaml0000664000175000017500000000021600000000000030772 0ustar00zuulzuul00000000000000--- features: - Watcher can now run specific actions in parallel improving the performances dramatically when executing an action plan. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/prometheus-datasource-e56f2f7b8f3427c2.yaml0000664000175000017500000000052300000000000030134 0ustar00zuulzuul00000000000000--- features: - | A new Prometheus data source is added. This allows the watcher decision engine to collect metrics from Prometheus server. For more information about the Prometheus data source, including limitations and configuration options see https://docs.openstack.org/watcher/latest/datasources/prometheus.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/remove-ceilometer-datasource-8d9ab7d64d61e405.yaml0000664000175000017500000000031700000000000031361 0ustar00zuulzuul00000000000000--- upgrade: - | Ceilometer datasource has been completely removed. The datasource requires ceilometer API which was already removed from Ceilometer. Use the other datasources such as Gnocchi. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/remove-nova-legacy-notifications-e1b6d10eff58f30a.yaml0000664000175000017500000000017600000000000032305 0ustar00zuulzuul00000000000000--- deprecations: - | Watcher removes the support to Nova legacy notifications because of Nova will deprecate them. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=python_watcher-14.0.0/releasenotes/notes/replace-cold-migrate-to-use-nova-migration-api-cecd9a39ddd3bc58.yaml 22 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/replace-cold-migrate-to-use-nova-migration-api-cecd9a39ddd30000664000175000017500000000100700000000000033351 0ustar00zuulzuul00000000000000--- features: - | Instance cold migration logic is now replaced with using Nova migrate Server(migrate Action) API which has host option since v2.56. upgrade: - | Nova API version is now set to 2.56 by default. This needs the migrate action of migration type cold with destination_node parameter to work. fixes: - | The migrate action of migration type cold with destination_node parameter was fixed. Before fixing, it booted an instance in the service project as a migrated instance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/scope-for-data-model-ea9792f90db14343.yaml0000664000175000017500000000033500000000000027427 0ustar00zuulzuul00000000000000--- features: - | For a large cloud infrastructure, retrieving data from Nova may take a long time. To avoid getting too much data from Nova, building the compute data model according to the scope of audit. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/service-versioned-notifications-api-70367b79a565d900.yaml0000664000175000017500000000007700000000000032444 0ustar00zuulzuul00000000000000--- features: - Add notifications related to Service object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/show-datamodel-api-6945b744fd5d25d5.yaml0000664000175000017500000000110200000000000027204 0ustar00zuulzuul00000000000000--- features: - | Add show data model api for Watcher. New in version 1.3. User can use 'openstack optimize datamodel list' command to view the current data model information in memory. User can also add '--audit ' to view specific data model in memory filted by the scope in audit. User can also add '--detail' to view detailed information about current data model. User can also add '--type ' to specify the type of data model. Default type is 'compute'. In the future, type 'storage' and 'baremetal' will be supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/stale-action-plan-b6a6b08df873c128.yaml0000664000175000017500000000016500000000000027117 0ustar00zuulzuul00000000000000--- features: - Check the creation time of the action plan, and set its state to SUPERSEDED if it has expired. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/standard-deviation-strategy-cd1d0c443fdfde9c.yaml0000664000175000017500000000036600000000000031523 0ustar00zuulzuul00000000000000--- features: - Added a strategy that monitors if there is a higher load on some hosts compared to other hosts in the cluster and re-balances the work across hosts to minimize the standard deviation of the loads in the cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/storage-workload-balance-0ecabbc1791e6894.yaml0000664000175000017500000000010100000000000030520 0ustar00zuulzuul00000000000000--- features: - | Added storage capacity balance strategy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/support-keystoneclient-option-b30d1ff45f86a2e7.yaml0000664000175000017500000000025000000000000031736 0ustar00zuulzuul00000000000000--- features: - | Add keystone_client Group for user to configure 'interface' and 'region_name' by watcher.conf. The default value of 'interface' is 'admin'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/support-placement-api-58ce6bef1bbbe98a.yaml0000664000175000017500000000057400000000000030344 0ustar00zuulzuul00000000000000--- features: - | Added Placement API helper to Watcher. Now Watcher can get information about resource providers, it can be used for the data model and strategies. Config group placement_client with options 'api_version', 'interface' and 'region_name' is also added. The default values for 'api_version' and 'interface' are 1.29 and 'public', respectively. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/suspended-audit-state-07f998c94e9d9a47.yaml0000664000175000017500000000006400000000000027771 0ustar00zuulzuul00000000000000--- features: - | Added SUSPENDED audit state ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/uniform-airflow-strategy-68cdba1419c3f770.yaml0000664000175000017500000000023200000000000030555 0ustar00zuulzuul00000000000000--- features: - Added a new strategy based on the airflow of servers. This strategy makes decisions to migrate VMs to make the airflow uniform. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/uwsgi-support-8dcea6961e56dad0.yaml0000664000175000017500000000065300000000000026617 0ustar00zuulzuul00000000000000--- upgrade: - | An Watcher API WSGI application script ``watcher-api-wsgi`` is now available. It is auto-generated by ``pbr`` and allows to run the API service using WSGI server (for example Nginx and uWSGI). deprecations: - | Using ``watcher/api/app.wsgi`` script is deprecated and it will be removed in U release. Please switch to automatically generated ``watcher-api-wsgi`` script instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/volume-migrate-action-fc57b0ce0e4c39ae.yaml0000664000175000017500000000006400000000000030224 0ustar00zuulzuul00000000000000--- features: - | Added volume migrate action ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/watcher-notifications-ovo-7b44d52ef6400dd0.yaml0000664000175000017500000000073000000000000030675 0ustar00zuulzuul00000000000000--- features: - Provide a notification mechanism into Watcher that supports versioning. Whenever a Watcher object is created, updated or deleted, a versioned notification will, if it's relevant, be automatically sent to notify in order to allow an event-driven style of architecture within Watcher. Moreover, it will also give other services and/or 3rd party software (e.g. monitoring solutions or rules engines) the ability to react to such events. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/watcher-planner-selector-84d77549d46f362a.yaml0000664000175000017500000000023100000000000030363 0ustar00zuulzuul00000000000000--- features: - Now Watcher strategy can select specific planner beyond default. Strategy can set planner property to specify its own planner. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/watcher-policies-1e86a30f0f11c6fa.yaml0000664000175000017500000000012200000000000027102 0ustar00zuulzuul00000000000000--- features: - Added policies to handle user rights to access Watcher API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/watcher-service-list-7b2f4b64f71e9b89.yaml0000664000175000017500000000010500000000000027657 0ustar00zuulzuul00000000000000--- features: - Add a service supervisor to watch Watcher daemons. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/watcher-versioned-objects-fc5abf5c81c4590c.yaml0000664000175000017500000000026700000000000031024 0ustar00zuulzuul00000000000000--- features: - all Watcher objects have been refactored to support OVO (oslo.versionedobjects) which was a prerequisite step in order to implement versioned notifications. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=python_watcher-14.0.0/releasenotes/notes/workload-balance-base-on-cpu-or-ram-util-3ff4ee968c32b2ed.yaml 22 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/workload-balance-base-on-cpu-or-ram-util-3ff4ee968c32b2ed.y0000664000175000017500000000036100000000000032740 0ustar00zuulzuul00000000000000--- features: - Existing workload_balance strategy based on the VM workloads of CPU. This feature improves the strategy. By the input parameter "metrics", it makes decision to migrate a VM base on CPU or memory utilization.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/workload-balance-migration-strategy-a0b05148a57815c0.yaml0000664000175000017500000000041100000000000032452 0ustar00zuulzuul00000000000000--- features: - Added a strategy based on the VM workloads of hypervisors. This strategy makes decisions to migrate workloads to make the total VM workloads of each hypervisor balanced, when the total VM workloads of hypervisor reaches threshold. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/notes/zone-migration-strategy-10f7656a2a01e607.yaml0000664000175000017500000000031000000000000030221 0ustar00zuulzuul00000000000000--- features: - | Added strategy "Zone migration" and it's goal "Hardware maintenance". The strategy migrates many instances and volumes efficiently with minimum downtime automatically. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6231353 python_watcher-14.0.0/releasenotes/source/0000775000175000017500000000000000000000000020646 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/2023.1.rst0000664000175000017500000000021000000000000022116 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: unmaintained/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000022120 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000022120 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/2024.2.rst0000664000175000017500000000020200000000000022121 0ustar00zuulzuul00000000000000=========================== 2024.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6231353 python_watcher-14.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000022274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000024545 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/conf.py0000664000175000017500000002042700000000000022152 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # watcher documentation build configuration file, created by # sphinx-quickstart on Fri Jun 3 11:37:52 2016. # # This file is execfile()d with the current directory set to its containing dir # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['reno.sphinxext', 'openstackdocstheme'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2016, Watcher developers' # Release notes are version independent # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # openstackdocstheme options openstackdocs_repo_name = 'openstack/watcher' openstackdocs_bug_project = 'watcher' openstackdocs_bug_tag = '' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'watcherdoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]) latex_documents = [ ('index', 'watcher.tex', 'Watcher Documentation', 'Watcher developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'watcher', 'Watcher Documentation', ['Watcher developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'watcher', 'Watcher Documentation', 'Watcher developers', 'watcher', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/index.rst0000664000175000017500000000166500000000000022517 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================= Welcome to watcher's Release Notes documentation! ================================================= Contents: .. toctree:: :maxdepth: 1 unreleased 2024.2 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.5991352 python_watcher-14.0.0/releasenotes/source/locale/0000775000175000017500000000000000000000000022105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.5991352 python_watcher-14.0.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000023057 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6231353 python_watcher-14.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000024644 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000013511700000000000027705 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata # Andi Chandler , 2024. #zanata msgid "" msgstr "" "Project-Id-Version: python-watcher\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2025-01-10 00:32+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2024-04-18 12:21+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "0.29.0" msgstr "0.29.0" msgid "0.34.0" msgstr "0.34.0" msgid "1.0.0" msgstr "1.0.0" msgid "1.1.0" msgstr "1.1.0" msgid "1.10.0" msgstr "1.10.0" msgid "1.11.0" msgstr "1.11.0" msgid "1.3.0" msgstr "1.3.0" msgid "1.4.0" msgstr "1.4.0" msgid "1.4.1" msgstr "1.4.1" msgid "1.5.0" msgstr "1.5.0" msgid "1.6.0" msgstr "1.6.0" msgid "1.7.0" msgstr "1.7.0" msgid "1.9.0" msgstr "1.9.0" msgid "2.0.0" msgstr "2.0.0" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "2023.2 Series Release Notes" msgstr "2023.2 Series Release Notes" msgid "3.0.0" msgstr "3.0.0" msgid "3.0.0.0rc1" msgstr "3.0.0.0rc1" msgid "4.0.0" msgstr "4.0.0" msgid "4.0.0.0rc1" msgstr "4.0.0.0rc1" msgid "6.0.0" msgstr "6.0.0" msgid "A ``watcher-status upgrade check`` has been added for this." msgstr "A ``watcher-status upgrade check`` has been added for this." msgid "" "A new threadpool for the decision engine that contributors can use to " "improve the performance of many operations, primarily I/O bound onces. The " "amount of workers used by the decision engine threadpool can be configured " "to scale according to the available infrastructure using the " "`watcher_decision_engine.max_general_workers` config option. Documentation " "for contributors to effectively use this threadpool is available online: " "https://docs.openstack.org/watcher/latest/contributor/concurrency.html" msgstr "" "A new threadpool for the decision engine that contributors can use to " "improve the performance of many operations, primarily I/O bound onces. The " "amount of workers used by the decision engine threadpool can be configured " "to scale according to the available infrastructure using the " "`watcher_decision_engine.max_general_workers` config option. Documentation " "for contributors to effectively use this threadpool is available online: " "https://docs.openstack.org/watcher/latest/contributor/concurrency.html" msgid "" "API calls while building the Compute data model will be retried upon " "failure. The amount of failures allowed before giving up and the time before " "reattempting are configurable. The `api_call_retries` and " "`api_query_timeout` parameters in the `[collector]` group can be used to " "adjust these paremeters. 10 retries with a 1 second time in between " "reattempts is the default." msgstr "" "API calls while building the Compute data model will be retried upon " "failure. The amount of failures allowed before giving up and the time before " "reattempting are configurable. The `api_call_retries` and " "`api_query_timeout` parameters in the `[collector]` group can be used to " "adjust these parameters. 10 retries with a 1 second time in between " "reattempts is the default." msgid "" "Add a new webhook API and a new audit type EVENT, the microversion is 1.4. " "Now Watcher user can create audit with EVENT type and the audit will be " "triggered by webhook API. The user guide is available online: https://docs." "openstack.org/watcher/latest/user/event_type_audit.html" msgstr "" "Add a new webhook API and a new audit type EVENT, the microversion is 1.4. " "Now Watcher user can create audit with EVENT type and the audit will be " "triggered by webhook API. The user guide is available online: https://docs." "openstack.org/watcher/latest/user/event_type_audit.html" msgid "Add a service supervisor to watch Watcher deamons." msgstr "Add a service supervisor to watch Watcher daemons." msgid "Add action for compute node power on/off" msgstr "Add action for compute node power on/off" msgid "" "Add description property for dynamic action. Admin can see detail " "information of any specify action." msgstr "" "Add description property for dynamic action. Admin can see detail " "information of any specify action." msgid "" "Add force field to Audit. User can set --force to enable the new option when " "launching audit. If force is True, audit will be executed despite of ongoing " "actionplan. The new audit may create a wrong actionplan if they use the same " "data model." msgstr "" "Add force field to Audit. User can set --force to enable the new option when " "launching audit. If force is True, audit will be executed despite of ongoing " "actionplan. The new audit may create a wrong actionplan if they use the same " "data model." msgid "" "Add keystone_client Group for user to configure 'interface' and " "'region_name' by watcher.conf. The default value of 'interface' is 'admin'." msgstr "" "Add keystone_client Group for user to configure 'interface' and " "'region_name' by watcher.conf. The default value of 'interface' is 'admin'." msgid "Add notifications related to Action object." msgstr "Add notifications related to Action object." msgid "Add notifications related to Action plan object." msgstr "Add notifications related to Action plan object." msgid "Add notifications related to Audit object." msgstr "Add notifications related to Audit object." msgid "Add notifications related to Service object." msgstr "Add notifications related to Service object." msgid "" "Add show data model api for Watcher. New in version 1.3. User can use " "'openstack optimize datamodel list' command to view the current data model " "information in memory. User can also add '--audit ' to view " "specific data model in memory filted by the scope in audit. User can also " "add '--detail' to view detailed information about current data model. User " "can also add '--type ' to specify the type of data model. Default type " "is 'compute'. In the future, type 'storage' and 'baremetal' will be " "supported." msgstr "" "Add show data model API for Watcher. New in version 1.3. User can use " "'openstack optimize datamodel list' command to view the current data model " "information in memory. User can also add '--audit ' to view " "specific data model in memory filtered by the scope in audit. User can also " "add '--detail' to view detailed information about current data model. User " "can also add '--type ' to specify the type of data model. Default type " "is 'compute'. In the future, type 'storage' and 'baremetal' will be " "supported." msgid "" "Add start_time and end_time fields in audits table. User can set the start " "time and/or end time when creating CONTINUOUS audit." msgstr "" "Add start_time and end_time fields in audits table. User can set the start " "time and/or end time when creating CONTINUOUS audit." msgid "" "Add superseded state for an action plan if the cluster data model has " "changed after it has been created." msgstr "" "Add superseded state for an action plan if the cluster data model has " "changed after it has been created." msgid "" "Added Placement API helper to Watcher. Now Watcher can get information about " "resource providers, it can be used for the data model and strategies. Config " "group placement_client with options 'api_version', 'interface' and " "'region_name' is also added. The default values for 'api_version' and " "'interface' are 1.29 and 'public', respectively." msgstr "" "Added Placement API helper to Watcher. Now Watcher can get information about " "resource providers, it can be used for the data model and strategies. Config " "group placement_client with options 'api_version', 'interface' and " "'region_name' is also added. The default values for 'api_version' and " "'interface' are 1.29 and 'public', respectively." msgid "Added SUSPENDED audit state" msgstr "Added SUSPENDED audit state" msgid "" "Added a generic scoring engine module, which will standardize interactions " "with scoring engines through the common API. It is possible to use the " "scoring engine by different Strategies, which improve the code and data " "model re-use." msgstr "" "Added a generic scoring engine module, which will standardize interactions " "with scoring engines through the common API. It is possible to use the " "scoring engine by different Strategies, which improve the code and data " "model re-use." msgid "" "Added a generic scoring engine module, which will standarize interactions " "with scoring engines through the common API. It is possible to use the " "scoring engine by different Strategies, which improve the code and data " "model re-use." msgstr "" "Added a generic scoring engine module, which will standardise interactions " "with scoring engines through the common API. It is possible to use the " "scoring engine by different Strategies, which improve the code and data " "model re-use." msgid "" "Added a new config option 'action_execution_rule' which is a dict type. Its " "key field is strategy name and the value is 'ALWAYS' or 'ANY'. 'ALWAYS' " "means the callback function returns True as usual. 'ANY' means the return " "depends on the result of previous action execution. The callback returns " "True if previous action gets failed, and the engine continues to run the " "next action. If previous action executes success, the callback returns False " "then the next action will be ignored. For strategies that aren't in " "'action_execution_rule', the callback always returns True. Please add the " "next section in the watcher.conf file if your strategy needs this feature. " "[watcher_workflow_engines.taskflow] action_execution_rule = {'your strategy " "name': 'ANY'}" msgstr "" "Added a new config option 'action_execution_rule' which is a dict type. Its " "key field is strategy name and the value is 'ALWAYS' or 'ANY'. 'ALWAYS' " "means the callback function returns True as usual. 'ANY' means the return " "depends on the result of previous action execution. The callback returns " "True if previous action gets failed, and the engine continues to run the " "next action. If previous action executes success, the callback returns False " "then the next action will be ignored. For strategies that aren't in " "'action_execution_rule', the callback always returns True. Please add the " "next section in the watcher.conf file if your strategy needs this feature. " "[watcher_workflow_engines.taskflow] action_execution_rule = {'your strategy " "name': 'ANY'}" msgid "" "Added a new strategy based on the airflow of servers. This strategy makes " "decisions to migrate VMs to make the airflow uniform." msgstr "" "Added a new strategy based on the airflow of servers. This strategy makes " "decisions to migrate VMs to make the airflow uniform." msgid "" "Added a standard way to both declare and fetch configuration options so that " "whenever the administrator generates the Watcher configuration sample file, " "it contains the configuration options of the plugins that are currently " "available." msgstr "" "Added a standard way to both declare and fetch configuration options so that " "whenever the administrator generates the Watcher configuration sample file, " "it contains the configuration options of the plugins that are currently " "available." msgid "" "Added a strategy based on the VM workloads of hypervisors. This strategy " "makes decisions to migrate workloads to make the total VM workloads of each " "hypervisor balanced, when the total VM workloads of hypervisor reaches " "threshold." msgstr "" "Added a strategy based on the VM workloads of hypervisors. This strategy " "makes decisions to migrate workloads to make the total VM workloads of each " "hypervisor balanced, when the total VM workloads of hypervisor reaches " "threshold." msgid "" "Added a strategy for one compute node maintenance, without having the user's " "application been interrupted. If given one backup node, the strategy will " "firstly migrate all instances from the maintenance node to the backup node. " "If the backup node is not provided, it will migrate all instances, relying " "on nova-scheduler." msgstr "" "Added a strategy for one compute node maintenance, without having the user's " "application been interrupted. If given one backup node, the strategy will " "firstly migrate all instances from the maintenance node to the backup node. " "If the backup node is not provided, it will migrate all instances, relying " "on nova-scheduler." msgid "" "Added a strategy that monitors if there is a higher load on some hosts " "compared to other hosts in the cluster and re-balances the work across hosts " "to minimize the standard deviation of the loads in the cluster." msgstr "" "Added a strategy that monitors if there is a higher load on some hosts " "compared to other hosts in the cluster and re-balances the work across hosts " "to minimise the standard deviation of the loads in the cluster." msgid "" "Added a way to add a new action without having to amend the source code of " "the default planner." msgstr "" "Added a way to add a new action without having to amend the source code of " "the default planner." msgid "" "Added a way to check state of strategy before audit's execution. " "Administrator can use \"watcher strategy state \" command to " "get information about metrics' availability, datasource's availability and " "CDM's availability." msgstr "" "Added a way to check state of strategy before audit's execution. " "Administrator can use \"watcher strategy state \" command to " "get information about metrics' availability, datasource's availability and " "CDM's availability." msgid "" "Added a way to compare the efficacy of different strategies for a give " "optimization goal." msgstr "" "Added a way to compare the efficacy of different strategies for a give " "optimisation goal." msgid "" "Added a way to create periodic audit to be able to optimize continuously the " "cloud infrastructure." msgstr "" "Added a way to create periodic audit to be able to continuously optimise the " "cloud infrastructure." msgid "" "Added a way to return the of available goals depending on which strategies " "have been deployed on the node where the decision engine is running." msgstr "" "Added a way to return the of available goals depending on which strategies " "have been deployed on the node where the decision engine is running." msgid "" "Added a way to return the of available goals depending on which strategies " "have been deployed on the node where the decison engine is running." msgstr "" "Added a way to return the of available goals depending on which strategies " "have been deployed on the node where the decision engine is running." msgid "" "Added an in-memory cache of the cluster model built up and kept fresh via " "notifications from services of interest in addition to periodic syncing " "logic." msgstr "" "Added an in-memory cache of the cluster model built up and kept fresh via " "notifications from services of interest in addition to periodic syncing " "logic." msgid "" "Added binding between apscheduler job and Watcher decision engine service. " "It will allow to provide HA support in the future." msgstr "" "Added binding between apscheduler job and Watcher decision engine service. " "It will allow to provide HA support in the future." msgid "Added cinder cluster data model" msgstr "Added cinder cluster data model" msgid "" "Added gnocchi support as data source for metrics. Administrator can change " "data source for each strategy using config file." msgstr "" "Added Gnocchi support as data source for metrics. Administrator can change " "data source for each strategy using config file." msgid "Added new tool ``watcher-status upgrade check``." msgstr "Added new tool ``watcher-status upgrade check``." msgid "" "Added notifications about cancelling of action plan. Now event based plugins " "know when action plan cancel started and completed." msgstr "" "Added notifications about cancelling of action plan. Now event based plugins " "know when action plan cancel started and completed." msgid "Added policies to handle user rights to access Watcher API." msgstr "Added policies to handle user rights to access Watcher API." msgid "Added storage capacity balance strategy." msgstr "Added storage capacity balance strategy." msgid "" "Added strategy \"Zone migration\" and it's goal \"Hardware maintenance\". " "The strategy migrates many instances and volumes efficiently with minimum " "downtime automatically." msgstr "" "Added strategy \"Zone migration\" and it's goal \"Hardware maintenance\". " "The strategy migrates many instances and volumes efficiently with minimum " "downtime automatically." msgid "" "Added strategy \"node resource consolidation\". This strategy is used to " "centralize VMs to as few nodes as possible by VM migration. User can set an " "input parameter to decide how to select the destination node." msgstr "" "Added strategy \"node resource consolidation\". This strategy is used to " "centralize VMs to as few nodes as possible by VM migration. User can set an " "input parameter to decide how to select the destination node." msgid "" "Added strategy to identify and migrate a Noisy Neighbor - a low priority VM " "that negatively affects peformance of a high priority VM by over utilizing " "Last Level Cache." msgstr "" "Added strategy to identify and migrate a Noisy Neighbour - a low priority VM " "that negatively affects performance of a high priority VM by over utilising " "Last Level Cache." msgid "" "Added strategy to identify and migrate a Noisy Neighbor - a low priority VM " "that negatively affects performance of a high priority VM by over utilizing " "Last Level Cache." msgstr "" "Added strategy to identify and migrate a Noisy Neighbour - a low-priority VM " "that negatively affects the performance of a high-priority VM by over " "utilising Last Level Cache." msgid "" "Added the functionality to filter out instances which have metadata field " "'optimize' set to False. For now, this is only available for the " "basic_consolidation strategy (if \"check_optimize_metadata\" configuration " "option is enabled)." msgstr "" "Added the functionality to filter out instances which have metadata field " "'optimize' set to False. For now, this is only available for the " "basic_consolidation strategy (if \"check_optimize_metadata\" configuration " "option is enabled)." msgid "Added using of JSONSchema instead of voluptuous to validate Actions." msgstr "Added using of JSONSchema instead of voluptuous to validate Actions." msgid "Added volume migrate action" msgstr "Added volume migrate action" msgid "" "Adds audit scoper for storage data model, now watcher users can specify " "audit scope for storage CDM in the same manner as compute scope." msgstr "" "Adds audit scoper for storage data model, now watcher users can specify " "audit scope for storage CDM in the same manner as compute scope." msgid "Adds baremetal data model in Watcher" msgstr "Adds baremetal data model in Watcher" msgid "" "All datasources can now be configured to retry retrieving a metric upon " "encountering an error. Between each attempt will be a set amount of time " "which can be adjusted from the configuration. These configuration options " "can be found in the `[watcher_datasources]` group and are named " "`query_max_retries` and `query_timeout`." msgstr "" "All datasources can now be configured to retry retrieving a metric upon " "encountering an error. Between each attempt will be a set amount of time " "which can be adjusted from the configuration. These configuration options " "can be found in the `[watcher_datasources]` group and are named " "`query_max_retries` and `query_timeout`." msgid "" "Allow decision engine to pass strategy parameters, like optimization " "threshold, to selected strategy, also strategy to provide parameters info to " "end user." msgstr "" "Allow decision engine to pass strategy parameters, like optimisation " "threshold, to selected strategy, also strategy to provide parameters info to " "end user." msgid "" "Allow using file to override metric map. Override the metric map of each " "datasource as soon as it is created by the manager. This override comes from " "a file whose path is provided by a setting in config file. The setting is " "`watcher_decision_engine/metric_map_path`. The file contains a map per " "datasource whose keys are the metric names as recognized by watcher and the " "value is the real name of the metric in the datasource. This setting " "defaults to `/etc/watcher/metric_map.yaml`, and presence of this file is " "optional." msgstr "" "Allow using file to override metric map. Override the metric map of each " "datasource as soon as it is created by the manager. This override comes from " "a file whose path is provided by a setting in config file. The setting is " "`watcher_decision_engine/metric_map_path`. The file contains a map per " "datasource whose keys are the metric names as recognized by watcher and the " "value is the real name of the metric in the datasource. This setting " "defaults to `/etc/watcher/metric_map.yaml`, and presence of this file is " "optional." msgid "" "An Watcher API WSGI application script ``watcher-api-wsgi`` is now " "available. It is auto-generated by ``pbr`` and allows to run the API service " "using WSGI server (for example Nginx and uWSGI)." msgstr "" "An Watcher API WSGI application script ``watcher-api-wsgi`` is now " "available. It is auto-generated by ``pbr`` and allows to run the API service " "using WSGI server (for example Nginx and uWSGI)." msgid "" "Audits have 'name' field now, that is more friendly to end users. Audit's " "name can't exceed 63 characters." msgstr "" "Audits have 'name' field now, that is more friendly to end users. Audit's " "name can't exceed 63 characters." msgid "" "Baremetal Model gets Audit scoper with an ability to exclude Ironic nodes." msgstr "" "Baremetal Model gets Audit scope with an ability to exclude Ironic nodes." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "Ceilometer Datasource has been deprecated since its API has been deprecated " "in Ocata cycle. Watcher has supported Ceilometer for some releases after " "Ocata to let users migrate to Gnocchi/Monasca datasources. Since Train " "release, Ceilometer support will be removed." msgstr "" "Ceilometer Datasource has been deprecated since its API has been deprecated " "in Ocata cycle. Watcher has supported Ceilometer for some releases after " "Ocata to let users migrate to Gnocchi/Monasca datasources. Since Train " "release, Ceilometer support will be removed." msgid "Centralize all configuration options for Watcher." msgstr "Centralise all configuration options for Watcher." msgid "" "Check the creation time of the action plan, and set its state to SUPERSEDED " "if it has expired." msgstr "" "Check the creation time of the action plan, and set its state to SUPERSEDED " "if it has expired." msgid "Contents:" msgstr "Contents:" msgid "" "Copy all audit templates parameters into audit instead of having a reference " "to the audit template." msgstr "" "Copy all audit templates parameters into audit instead of having a reference " "to the audit template." msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Each CDM collector can have its own CDM scoper now. This changed Scope JSON " "schema definition for the audit template POST data. Please see audit " "template create help message in python-watcherclient." msgstr "" "Each CDM collector can have its own CDM scoper now. This changed Scope JSON " "schema definition for the audit template POST data. Please see audit " "template create help message in python-watcherclient." msgid "" "Enhancement of vm_workload_consolidation strategy by using 'memory.resident' " "metric in place of 'memory.usage', as memory.usage shows the memory usage " "inside guest-os and memory.resident represents volume of RAM used by " "instance on host machine." msgstr "" "Enhancement of vm_workload_consolidation strategy by using 'memory.resident' " "metric in place of 'memory.usage', as memory.usage shows the memory usage " "inside guest-os and memory.resident represents volume of RAM used by " "instance on host machine." msgid "" "Existing workload_balance strategy based on the VM workloads of CPU. This " "feature improves the strategy. By the input parameter \"metrics\", it makes " "decision to migrate a VM base on CPU or memory utilization." msgstr "" "Existing workload_balance strategy based on the VM workloads of CPU. This " "feature improves the strategy. By the input parameter \"metrics\", it makes " "decision to migrate a VM base on CPU or memory utilisation." msgid "" "Feature to exclude instances from audit scope based on project_id is added. " "Now instances from particular project in OpenStack can be excluded from " "audit defining scope in audit templates." msgstr "" "Feature to exclude instances from audit scope based on project_id is added. " "Now instances from particular project in OpenStack can be excluded from " "audit defining scope in audit templates." msgid "" "For a large cloud infrastructure, retrieving data from Nova may take a long " "time. To avoid getting too much data from Nova, building the compute data " "model according to the scope of audit." msgstr "" "For a large cloud infrastructure, retrieving data from Nova may take a long " "time. To avoid getting too much data from Nova, building the compute data " "model according to the scope of audit." msgid "" "Grafana has been added as datasource that can be used for collecting " "metrics. The configuration options allow to specify what metrics and how " "they are stored in grafana so that no matter how Grafana is configured it " "can still be used. The configuration can be done via the typical " "configuration file but it is recommended to configure most options in the " "yaml file for metrics. For a complete walkthrough on configuring Grafana " "see: https://docs.openstack.org/watcher/latest/datasources/grafana.html" msgstr "" "Grafana has been added as datasource that can be used for collecting " "metrics. The configuration options allow to specify what metrics and how " "they are stored in Grafana so that no matter how Grafana is configured it " "can still be used. The configuration can be done via the typical " "configuration file but it is recommended to configure most options in the " "yaml file for metrics. For a complete walkthrough on configuring Grafana " "see: https://docs.openstack.org/watcher/latest/datasources/grafana.html" msgid "" "If Gnocchi was configured to have a custom amount of retries and or a custom " "timeout then the configuration needs to moved into the " "`[watcher_datasources]` group instead of the `[gnocchi_client]` group." msgstr "" "If Gnocchi was configured to have a custom amount of retries and or a custom " "timeout then the configuration needs to moved into the " "`[watcher_datasources]` group instead of the `[gnocchi_client]` group." msgid "" "Improved interface for datasource baseclass that better defines expected " "values and types for parameters and return types of all abstract methods. " "This allows all strategies to work with every datasource provided the " "metrics are configured for that given datasource." msgstr "" "Improved interface for datasource baseclass that better defines expected " "values and types for parameters and return types of all abstract methods. " "This allows all strategies to work with every datasource provided the " "metrics are configured for that given datasource." msgid "" "Instance cold migration logic is now replaced with using Nova migrate " "Server(migrate Action) API which has host option since v2.56." msgstr "" "Instance cold migration logic is now replaced with using Nova migrate " "Server(migrate Action) API which has host option since v2.56." msgid "" "Many operations in the decision engine will block on I/O. Such I/O " "operations can stall the execution of a sequential application " "significantly. To reduce the potential bottleneck of many operations the " "general purpose decision engine threadpool is introduced." msgstr "" "Many operations in the decision engine will block on I/O. Such I/O " "operations can stall the execution of a sequential application " "significantly. To reduce the potential bottleneck of many operations the " "general purpose decision engine threadpool is introduced." msgid "New Features" msgstr "New Features" msgid "" "New framework for ``watcher-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Watcher " "upgrade to ensure if the upgrade can be performed safely." msgstr "" "New framework for ``watcher-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Watcher " "upgrade to ensure if the upgrade can be performed safely." msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "" "Nova API version is now set to 2.56 by default. This needs the migrate " "action of migration type cold with destination_node parameter to work." msgstr "" "Nova API version is now set to 2.56 by default. This needs the migrate " "action of migration type cold with destination_node parameter to work." msgid "" "Now Watcher strategy can select specific planner beyond default. Strategy " "can set planner property to specify its own planner." msgstr "" "Now Watcher strategy can select specific planner beyond default. Strategy " "can set planner property to specify its own planner." msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "" "Operator can now use new CLI tool ``watcher-status upgrade check`` to check " "if Watcher deployment can be safely upgraded from N-1 to N release." msgstr "" "Operator can now use new CLI tool ``watcher-status upgrade check`` to check " "if Watcher deployment can be safely upgraded from N-1 to N release." msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "Prelude" msgstr "Prelude" msgid "" "Provide a notification mechanism into Watcher that supports versioning. " "Whenever a Watcher object is created, updated or deleted, a versioned " "notification will, if it's relevant, be automatically sent to notify in " "order to allow an event-driven style of architecture within Watcher. " "Moreover, it will also give other services and/or 3rd party softwares (e.g. " "monitoring solutions or rules engines) the ability to react to such events." msgstr "" "Provide a notification mechanism into Watcher that supports versioning. " "Whenever a Watcher object is created, updated or deleted, a versioned " "notification will, if it's relevant, be automatically sent to notify in " "order to allow an event-driven style of architecture within Watcher. " "Moreover, it will also give other services and/or 3rd party software (e.g. " "monitoring solutions or rules engines) the ability to react to such events." msgid "" "Provides a generic way to define the scope of an audit. The set of audited " "resources will be called \"Audit scope\" and will be defined in each audit " "template (which contains the audit settings)." msgstr "" "Provides a generic way to define the scope of an audit. The set of audited " "resources will be called \"Audit scope\" and will be defined in each audit " "template (which contains the audit settings)." msgid "" "Python 2.7 support has been dropped. Last release of Watcher to support " "py2.7 is OpenStack Train. The minimum version of Python now supported by " "Watcher is Python 3.6." msgstr "" "Python 2.7 support has been dropped. Last release of Watcher to support " "py2.7 is OpenStack Train. The minimum version of Python now supported by " "Watcher is Python 3.6." msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "" "Several strategies have changed the `node` parameter to `compute_node` to be " "better aligned with terminology. These strategies include " "`basic_consolidation` and `workload_stabilzation`. The `node` parameter will " "remain supported during Train release and will be removed in the subsequent " "release." msgstr "" "Several strategies have changed the `node` parameter to `compute_node` to be " "better aligned with terminology. These strategies include " "`basic_consolidation` and `workload_stabilzation`. The `node` parameter will " "remain supported during Train release and will be removed in the subsequent " "release." msgid "" "Specific strategies can override this order and use datasources which are " "not listed in the global preference." msgstr "" "Specific strategies can override this order and use datasources which are " "not listed in the global preference." msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "The building of the compute (Nova) data model will be done using the " "decision engine threadpool, thereby, significantly reducing the total time " "required to build it." msgstr "" "The building of the compute (Nova) data model will be done using the " "decision engine threadpool, thereby, significantly reducing the total time " "required to build it." msgid "" "The configuration options for query retries in `[gnocchi_client]` are " "deprecated and the option in `[watcher_datasources]` should now be used." msgstr "" "The configuration options for query retries in `[gnocchi_client]` are " "deprecated and the option in `[watcher_datasources]` should now be used." msgid "" "The default value of ``[oslo_policy] policy_file`` config option has been " "changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing " "customized or previously generated static policy JSON files (which are not " "needed by default), should generate new policy files or convert them in YAML " "format. Use the `oslopolicy-convert-json-to-yaml `_ tool to " "convert a JSON to YAML formatted policy file in backward compatible way." msgstr "" "The default value of ``[oslo_policy] policy_file`` config option has been " "changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing " "customized or previously generated static policy JSON files (which are not " "needed by default), should generate new policy files or convert them in YAML " "format. Use the `oslopolicy-convert-json-to-yaml `_ tool to " "convert a JSON to YAML formatted policy file in backward compatible way." msgid "" "The graph model describes how VMs are associated to compute hosts. This " "allows for seeing relationships upfront between the entities and hence can " "be used to identify hot/cold spots in the data center and influence a " "strategy decision." msgstr "" "The graph model describes how VMs are associated to compute hosts. This " "allows for seeing relationships upfront between the entities and hence can " "be used to identify hot/cold spots in the data centre and influence a " "strategy decision." msgid "" "The migrate action of migration type cold with destination_node parameter " "was fixed. Before fixing, it booted an instance in the service project as a " "migrated instance." msgstr "" "The migrate action of migration type cold with destination_node parameter " "was fixed. Before fixing, it booted an instance in the service project as a " "migrated instance." msgid "" "The minimum required version of the ``[nova_client]/api_version`` value is " "now enforced to be ``2.56`` which is available since the Queens version of " "the nova compute service." msgstr "" "The minimum required version of the ``[nova_client]/api_version`` value is " "now enforced to be ``2.56`` which is available since the Queens version of " "the Nova compute service." msgid "" "The new strategy baseclass has significant changes in method parameters and " "any out-of-tree strategies will have to be adopted." msgstr "" "The new strategy baseclass has significant changes in method parameters and " "any out-of-tree strategies will have to be adopted." msgid "" "There is new ability to create Watcher continuous audits with cron interval. " "It means you may use, for example, optional argument '--interval \"\\*/5 \\* " "\\* \\* \\*\"' to launch audit every 5 minutes. These jobs are executed on a " "best effort basis and therefore, we recommend you to use a minimal cron " "interval of at least one minute." msgstr "" "There is new ability to create Watcher continuous audits with cron interval. " "It means you may use, for example, optional argument '--interval \"\\*/5 \\* " "\\* \\* \\*\"' to launch audit every 5 minutes. These jobs are executed on a " "best effort basis and therefore, we recommend you to use a minimal cron " "interval of at least one minute." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "" "Use of JSON policy files was deprecated by the ``oslo.policy`` library " "during the Victoria development cycle. As a result, this deprecation is " "being noted in the Wallaby cycle with an anticipated future removal of " "support by ``oslo.policy``. As such operators will need to convert to YAML " "policy files. Please see the upgrade notes for details on migration of any " "custom policy files." msgstr "" "Use of JSON policy files was deprecated by the ``oslo.policy`` library " "during the Victoria development cycle. As a result, this deprecation is " "being noted in the Wallaby cycle with an anticipated future removal of " "support by ``oslo.policy``. As such operators will need to convert to YAML " "policy files. Please see the upgrade notes for details on migration of any " "custom policy files." msgid "" "Using ``watcher/api/app.wsgi`` script is deprecated and it will be removed " "in U release. Please switch to automatically generated ``watcher-api-wsgi`` " "script instead." msgstr "" "Using ``watcher/api/app.wsgi`` script is deprecated and it will be removed " "in U release. Please switch to automatically generated ``watcher-api-wsgi`` " "script instead." msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "" "Watcher can continuously optimize the OpenStack cloud for a specific " "strategy or goal by triggering an audit periodically which generates an " "action plan and run it automatically." msgstr "" "Watcher can continuously optimise the OpenStack cloud for a specific " "strategy or goal by triggering an audit periodically which generates an " "action plan and run it automatically." msgid "" "Watcher can get resource information such as total, allocation ratio and " "reserved information from Placement API. Now we add some new fields to the " "Watcher Data Model:" msgstr "" "Watcher can get resource information such as total, allocation ratio and " "reserved information from Placement API. Now we add some new fields to the " "Watcher Data Model:" msgid "" "Watcher can now run specific actions in parallel improving the performances " "dramatically when executing an action plan." msgstr "" "Watcher can now run specific actions in parallel improving the performance " "dramatically when executing an action plan." msgid "" "Watcher consumes Nova notifications to update its internal Compute " "CDM(Cluster Data Model). All the notifications as below" msgstr "" "Watcher consumes Nova notifications to update its internal Compute " "CDM(Cluster Data Model). All the notifications as below" msgid "Watcher database can now be upgraded thanks to Alembic." msgstr "Watcher database can now be upgraded thanks to Alembic." msgid "" "Watcher got an ability to calculate multiple global efficacy indicators " "during audit's execution. Now global efficacy can be calculated for many " "resource types (like volumes, instances, network) if strategy supports " "efficacy indicators." msgstr "" "Watcher got an ability to calculate multiple global efficacy indicators " "during audit's execution. Now global efficacy can be calculated for many " "resource types (like volumes, instances, network) if strategy supports " "efficacy indicators." msgid "" "Watcher has a whole scope of the cluster, when building compute CDM which " "includes all instances. It filters excluded instances when migration during " "the audit." msgstr "" "Watcher has a whole scope of the cluster, when building compute CDM which " "includes all instances. It filters excluded instances when migration during " "the audit." msgid "" "Watcher now supports configuring which datasource to use and in which order. " "This configuration is done by specifying datasources in the " "watcher_datasources section:" msgstr "" "Watcher now supports configuring which datasource to use and in which order. " "This configuration is done by specifying datasources in the " "watcher_datasources section:" msgid "" "Watcher removes the support to Nova legacy notifications because of Nova " "will deprecate them." msgstr "" "Watcher removes the support to Nova legacy notifications because of Nova " "will deprecate them." msgid "" "Watcher services can be launched in HA mode. From now on Watcher Decision " "Engine and Watcher Applier services may be deployed on different nodes to " "run in active-active or active-passive mode. Any ONGOING Audits or Action " "Plans will be CANCELLED if service they are executed on is restarted." msgstr "" "Watcher services can be launched in HA mode. From now on Watcher Decision " "Engine and Watcher Applier services may be deployed on different nodes to " "run in active-active or active-passive mode. Any ONGOING Audits or Action " "Plans will be CANCELLED if service they are executed on is restarted." msgid "" "Watcher starts to support API microversions since Stein cycle. From now " "onwards all API changes should be made with saving backward compatibility. " "To specify API version operator should use OpenStack-API-Version HTTP " "header. If operator wants to know the mininum and maximum supported versions " "by API, he/she can access /v1 resource and Watcher API will return " "appropriate headers in response." msgstr "" "Watcher starts to support API microversions since the Stein cycle. From now " "onwards all API changes should be made with saving backward compatibility. " "To specify API version operator should use OpenStack-API-Version HTTP " "header. If operator wants to know the minimum and maximum supported versions " "by API, he/she can access /v1 resource and Watcher API will return " "appropriate headers in response." msgid "" "Watcher supports multiple metrics backend and relies on Ceilometer and " "Monasca." msgstr "" "Watcher supports multiple metrics backend and relies on Ceilometer and " "Monasca." msgid "We also add some new propeties:" msgstr "We also add some new properties:" msgid "Welcome to watcher's Release Notes documentation!" msgstr "Welcome to watcher's Release Notes documentation!" msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" msgid "``[watcher_datasources] datasources = gnocchi,monasca,ceilometer``" msgstr "``[watcher_datasources] datasources = gnocchi,monasca,ceilometer``" msgid "" "all Watcher objects have been refactored to support OVO (oslo." "versionedobjects) which was a prerequisite step in order to implement " "versioned notifications." msgstr "" "all Watcher objects have been refactored to support OVO (oslo." "versionedobjects) which was a prerequisite step in order to implement " "versioned notifications." msgid "" "disk_gb_capacity: The amount of disk, take allocation ratio into account, " "but do not include reserved." msgstr "" "disk_gb_capacity: The amount of disk, take allocation ratio into account, " "but do not include reserved." msgid "" "disk_gb_reserved: The amount of disk a node has reserved for its own use." msgstr "" "disk_gb_reserved: The amount of disk a node has reserved for its own use." msgid "disk_ratio: Disk allocation ratio." msgstr "disk_ratio: Disk allocation ratio." msgid "instance.create.end" msgstr "instance.create.end" msgid "instance.delete.end" msgstr "instance.delete.end" msgid "instance.live_migration_force_complete.end" msgstr "instance.live_migration_force_complete.end" msgid "instance.live_migration_post_dest.end" msgstr "instance.live_migration_post_dest.end" msgid "instance.lock" msgstr "instance.lock" msgid "instance.pause.end" msgstr "instance.pause.end" msgid "instance.power_off.end" msgstr "instance.power_off.end" msgid "instance.power_on.end" msgstr "instance.power_on.end" msgid "instance.rebuild.end" msgstr "instance.rebuild.end" msgid "instance.rescue.end" msgstr "instance.rescue.end" msgid "instance.resize_confirm.end" msgstr "instance.resize_confirm.end" msgid "instance.restore.end" msgstr "instance.restore.end" msgid "instance.resume.end" msgstr "instance.resume.end" msgid "instance.shelve.end" msgstr "instance.shelve.end" msgid "instance.shutdown.end" msgstr "instance.shutdown.end" msgid "instance.soft_delete.end" msgstr "instance.soft_delete.end" msgid "instance.suspend.end" msgstr "instance.suspend.end" msgid "instance.unlock" msgstr "instance.unlock" msgid "instance.unpause.end" msgstr "instance.unpause.end" msgid "instance.unrescue.end" msgstr "instance.unrescue.end" msgid "instance.unshelve.end" msgstr "instance.unshelve.end" msgid "instance.update" msgstr "instance.update" msgid "" "memory_mb_capacity: The amount of memory, take allocation ratio into " "account, but do not include reserved." msgstr "" "memory_mb_capacity: The amount of memory, take allocation ratio into " "account, but do not include reserved." msgid "" "memory_mb_reserved: The amount of memory a node has reserved for its own use." msgstr "" "memory_mb_reserved: The amount of memory a node has reserved for its own use." msgid "memory_ratio: Memory allocation ratio." msgstr "memory_ratio: Memory allocation ratio." msgid "new:" msgstr "new:" msgid "pre-existing:" msgstr "pre-existing:" msgid "service.create" msgstr "service.create" msgid "service.delete" msgstr "service.delete" msgid "service.update" msgstr "service.update" msgid "" "vcpu_capacity: The amount of vcpu, take allocation ratio into account, but " "do not include reserved." msgstr "" "vcpu_capacity: The amount of vcpu, take allocation ratio into account, but " "do not include reserved." msgid "vcpu_ratio: CPU allocation ratio." msgstr "vcpu_ratio: CPU allocation ratio." msgid "vcpu_reserved: The amount of cpu a node has reserved for its own use." msgstr "vcpu_reserved: The amount of CPU a node has reserved for its own use." ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/newton.rst0000664000175000017500000000023200000000000022707 0ustar00zuulzuul00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000022462 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000022330 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000022675 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000022522 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000022515 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000022521 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000016000000000000023524 0ustar00zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000022724 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/victoria.rst0000664000175000017500000000022000000000000023212 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000021400000000000023030 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/xena.rst0000664000175000017500000000020000000000000022323 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/yoga.rst0000664000175000017500000000020000000000000022327 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/releasenotes/source/zed.rst0000664000175000017500000000017400000000000022164 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/requirements.txt0000664000175000017500000000325000000000000020141 0ustar00zuulzuul00000000000000# Requirements lower bounds listed here are our best effort to keep them up to # date but we do not test them so no guarantee of having them all correct. If # you find any incorrect lower bounds, let us know or propose a fix. apscheduler>=3.5.1 # MIT License eventlet>=0.27.0 # MIT jsonpatch>=1.21 # BSD keystoneauth1>=3.4.0 # Apache-2.0 jsonschema>=3.2.0 # MIT keystonemiddleware>=4.21.0 # Apache-2.0 lxml>=4.5.1 # BSD croniter>=0.3.20 # MIT License os-resource-classes>=0.4.0 oslo.concurrency>=3.26.0 # Apache-2.0 oslo.cache>=1.29.0 # Apache-2.0 oslo.config>=6.8.0 # Apache-2.0 oslo.context>=2.21.0 # Apache-2.0 oslo.db>=4.44.0 # Apache-2.0 oslo.i18n>=3.20.0 # Apache-2.0 oslo.log>=3.37.0 # Apache-2.0 oslo.messaging>=14.1.0 # Apache-2.0 oslo.policy>=4.5.0 # Apache-2.0 oslo.reports>=1.27.0 # Apache-2.0 oslo.serialization>=2.25.0 # Apache-2.0 oslo.service>=1.30.0 # Apache-2.0 oslo.upgradecheck>=1.3.0 # Apache-2.0 oslo.utils>=7.0.0 # Apache-2.0 oslo.versionedobjects>=1.32.0 # Apache-2.0 PasteDeploy>=1.5.2 # MIT pbr>=3.1.1 # Apache-2.0 pecan>=1.3.2 # BSD PrettyTable>=0.7.2 # BSD gnocchiclient>=7.0.1 # Apache-2.0 python-cinderclient>=3.5.0 # Apache-2.0 python-glanceclient>=2.9.1 # Apache-2.0 python-keystoneclient>=3.15.0 # Apache-2.0 python-monascaclient>=1.12.0 # Apache-2.0 python-neutronclient>=6.7.0 # Apache-2.0 python-novaclient>=14.1.0 # Apache-2.0 python-observabilityclient>=0.3.0 # Apache-2.0 python-openstackclient>=3.14.0 # Apache-2.0 python-ironicclient>=2.5.0 # Apache-2.0 SQLAlchemy>=1.2.5 # MIT stevedore>=1.28.0 # Apache-2.0 taskflow>=3.8.0 # Apache-2.0 WebOb>=1.8.5 # MIT WSME>=0.9.2 # MIT networkx>=2.4 # BSD microversion_parse>=0.2.1 # Apache-2.0 futurist>=1.8.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6591353 python_watcher-14.0.0/setup.cfg0000664000175000017500000001302100000000000016473 0ustar00zuulzuul00000000000000[metadata] name = python-watcher summary = OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/watcher/latest/ python_requires = >=3.9 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 [files] packages = watcher data_files = etc/ = etc/* [entry_points] oslo.config.opts = watcher = watcher.conf.opts:list_opts oslo.policy.policies = watcher = watcher.common.policies:list_rules oslo.policy.enforcer = watcher = watcher.common.policy:get_enforcer console_scripts = watcher-api = watcher.cmd.api:main watcher-db-manage = watcher.cmd.dbmanage:main watcher-decision-engine = watcher.cmd.decisionengine:main watcher-applier = watcher.cmd.applier:main watcher-sync = watcher.cmd.sync:main watcher-status = watcher.cmd.status:main wsgi_scripts = watcher-api-wsgi = watcher.api.wsgi:initialize_wsgi_app watcher.database.migration_backend = sqlalchemy = watcher.db.sqlalchemy.migration watcher_goals = unclassified = watcher.decision_engine.goal.goals:Unclassified dummy = watcher.decision_engine.goal.goals:Dummy server_consolidation = watcher.decision_engine.goal.goals:ServerConsolidation thermal_optimization = watcher.decision_engine.goal.goals:ThermalOptimization workload_balancing = watcher.decision_engine.goal.goals:WorkloadBalancing airflow_optimization = watcher.decision_engine.goal.goals:AirflowOptimization noisy_neighbor = watcher.decision_engine.goal.goals:NoisyNeighborOptimization saving_energy = watcher.decision_engine.goal.goals:SavingEnergy hardware_maintenance = watcher.decision_engine.goal.goals:HardwareMaintenance cluster_maintaining = watcher.decision_engine.goal.goals:ClusterMaintaining watcher_scoring_engines = dummy_scorer = watcher.decision_engine.scoring.dummy_scorer:DummyScorer watcher_scoring_engine_containers = dummy_scoring_container = watcher.decision_engine.scoring.dummy_scoring_container:DummyScoringContainer watcher_strategies = dummy = watcher.decision_engine.strategy.strategies.dummy_strategy:DummyStrategy dummy_with_scorer = watcher.decision_engine.strategy.strategies.dummy_with_scorer:DummyWithScorer dummy_with_resize = watcher.decision_engine.strategy.strategies.dummy_with_resize:DummyWithResize actuator = watcher.decision_engine.strategy.strategies.actuation:Actuator basic = watcher.decision_engine.strategy.strategies.basic_consolidation:BasicConsolidation outlet_temperature = watcher.decision_engine.strategy.strategies.outlet_temp_control:OutletTempControl saving_energy = watcher.decision_engine.strategy.strategies.saving_energy:SavingEnergy vm_workload_consolidation = watcher.decision_engine.strategy.strategies.vm_workload_consolidation:VMWorkloadConsolidation workload_stabilization = watcher.decision_engine.strategy.strategies.workload_stabilization:WorkloadStabilization workload_balance = watcher.decision_engine.strategy.strategies.workload_balance:WorkloadBalance uniform_airflow = watcher.decision_engine.strategy.strategies.uniform_airflow:UniformAirflow noisy_neighbor = watcher.decision_engine.strategy.strategies.noisy_neighbor:NoisyNeighbor storage_capacity_balance = watcher.decision_engine.strategy.strategies.storage_capacity_balance:StorageCapacityBalance zone_migration = watcher.decision_engine.strategy.strategies.zone_migration:ZoneMigration host_maintenance = watcher.decision_engine.strategy.strategies.host_maintenance:HostMaintenance node_resource_consolidation = watcher.decision_engine.strategy.strategies.node_resource_consolidation:NodeResourceConsolidation watcher_actions = migrate = watcher.applier.actions.migration:Migrate nop = watcher.applier.actions.nop:Nop sleep = watcher.applier.actions.sleep:Sleep change_nova_service_state = watcher.applier.actions.change_nova_service_state:ChangeNovaServiceState resize = watcher.applier.actions.resize:Resize change_node_power_state = watcher.applier.actions.change_node_power_state:ChangeNodePowerState volume_migrate = watcher.applier.actions.volume_migration:VolumeMigrate watcher_workflow_engines = taskflow = watcher.applier.workflow_engine.default:DefaultWorkFlowEngine watcher_planners = weight = watcher.decision_engine.planner.weight:WeightPlanner workload_stabilization = watcher.decision_engine.planner.workload_stabilization:WorkloadStabilizationPlanner node_resource_consolidation = watcher.decision_engine.planner.node_resource_consolidation:NodeResourceConsolidationPlanner watcher_cluster_data_model_collectors = compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector baremetal = watcher.decision_engine.model.collector.ironic:BaremetalClusterDataModelCollector [codespell] skip = *.po,*.js,*.css,*.html,*.svg,HACKING.py,*hacking*,*build*,*_static*,doc/dictionary.txt,*.pyc,*.inv,*.gz,*.jpg,*.png,*.vsd,*.graffle,*.json count = quiet-level = 4 [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/setup.py0000664000175000017500000000127100000000000016370 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/test-requirements.txt0000664000175000017500000000030700000000000021116 0ustar00zuulzuul00000000000000coverage>=4.5.1 # Apache-2.0 freezegun>=0.3.10 # Apache-2.0 oslotest>=3.3.0 # Apache-2.0 testscenarios>=0.5.0 # Apache-2.0/BSD testtools>=2.3.0 # MIT stestr>=2.0.0 # Apache-2.0 WebTest>=2.0.27 # MIT ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/tox.ini0000664000175000017500000001015100000000000016166 0ustar00zuulzuul00000000000000[tox] minversion = 3.18.0 envlist = py3,pep8 ignore_basepython_conflict = True [testenv] basepython = python3 usedevelop = True allowlist_externals = find rm install_command = pip install -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages} setenv = VIRTUAL_ENV={envdir} OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=30 PYTHONDONTWRITEBYTECODE=1 deps = -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt python-libmaas>=0.6.8 commands = rm -f .testrepository/times.dbm find . -type f -name "*.py[c|o]" -delete stestr run {posargs} passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY OS_DEBUG # NOTE(sean-k-mooney) optimization is enabled by default and when enabled # asserts are complied out. Disable optimization to allow asserts in # nova to fire in unit and functional tests. This can be useful for # debugging issue with fixtures and mocks. PYTHONOPTIMIZE [testenv:pep8] description = Run style checks. skip_install = true deps = pre-commit commands = pre-commit run --all-files --show-diff-on-failure [testenv:venv] setenv = PYTHONHASHSEED=0 deps = -r{toxinidir}/doc/requirements.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = {posargs} [testenv:cover] setenv = PYTHON=coverage run --source watcher --parallel-mode commands = stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml coverage report [testenv:docs] setenv = PYTHONHASHSEED=0 deps = -r{toxinidir}/doc/requirements.txt commands = rm -fr doc/build doc/source/api/ .autogenerated sphinx-build -W --keep-going -b html doc/source doc/build/html [testenv:api-ref] deps = -r{toxinidir}/doc/requirements.txt allowlist_externals = bash commands = bash -c 'rm -rf api-ref/build' sphinx-build -W --keep-going -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html [testenv:debug] commands = oslo_debug_helper -t watcher/tests {posargs} [testenv:genconfig] sitepackages = False commands = oslo-config-generator --config-file etc/watcher/oslo-config-generator/watcher.conf [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file etc/watcher/oslo-policy-generator/watcher-policy-generator.conf [testenv:wheel] commands = python setup.py bdist_wheel [testenv:pdf-docs] deps = {[testenv:docs]deps} allowlist_externals = rm make commands = rm -rf doc/build/pdf sphinx-build -W --keep-going -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:releasenotes] deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -W -E -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html [testenv:bandit] skip_install = true deps = {[testenv:pep8]deps} commands = pre-commit run --all-files --show-diff-on-failure bandit [flake8] filename = *.py,app.wsgi show-source=True # W504 line break after binary operator ignore= H105,E123,E226,N320,H202,W504 builtins= _ enable-extensions = H106,H203,H904 exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes [hacking] import_exceptions = watcher._i18n [flake8:local-plugins] extension = N319 = checks:no_translate_debug_logs N321 = checks:use_jsonutils N322 = checks:check_assert_called_once_with N325 = checks:check_python3_xrange N326 = checks:check_no_basestring N327 = checks:check_python3_no_iteritems N328 = checks:check_asserttrue N329 = checks:check_assertfalse N330 = checks:check_assertempty N331 = checks:check_assertisinstance N332 = checks:check_assertequal_for_httpcode N333 = checks:check_log_warn_deprecated N340 = checks:check_oslo_i18n_wrapper N341 = checks:check_builtins_gettext N342 = checks:no_redundant_import_alias N366 = checks:import_stock_mock paths = ./watcher/hacking [doc8] extension=.rst # todo: stop ignoring doc/source/man when https://bugs.launchpad.net/doc8/+bug/1502391 is fixed ignore-path=doc/source/image_src,doc/source/man,doc/source/api ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6231353 python_watcher-14.0.0/watcher/0000775000175000017500000000000000000000000016312 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/__init__.py0000664000175000017500000000120000000000000020414 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version __version__ = pbr.version.VersionInfo('python-watcher').version_string() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/_i18n.py0000664000175000017500000000241700000000000017606 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import oslo_i18n from oslo_i18n import _lazy # The domain is the name of the App which is used to generate the folder # containing the translation files (i.e. the .pot file and the various locales) DOMAIN = "watcher" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form def lazy_translation_enabled(): return _lazy.USE_LAZY def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6231353 python_watcher-14.0.0/watcher/api/0000775000175000017500000000000000000000000017063 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/__init__.py0000664000175000017500000000000000000000000021162 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/acl.py0000664000175000017500000000270200000000000020175 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # Copyright (c) 2016 Intel Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Access Control Lists (ACL's) control access the API server.""" from watcher.api.middleware import auth_token from watcher import conf CONF = conf.CONF def install(app, conf, public_routes): """Install ACL check on application. :param app: A WSGI application. :param conf: Settings. Dict'ified and passed to keystonemiddleware :param public_routes: The list of the routes which will be allowed to access without authentication. :return: The same WSGI application with ACL installed. """ if not CONF.get('enable_authentication'): return app return auth_token.AuthTokenMiddleware(app, conf=dict(conf.keystone_authtoken), public_api_routes=public_routes) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/app.py0000664000175000017500000000317400000000000020222 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright © 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # Copyright (c) 2016 Intel Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from watcher.api import acl from watcher.api import config as api_config from watcher.api.middleware import parsable_error from watcher import conf CONF = conf.CONF def get_pecan_config(): # Set up the pecan configuration return pecan.configuration.conf_from_dict(api_config.PECAN_CONFIG) def setup_app(config=None): if not config: config = get_pecan_config() app_conf = dict(config.app) app = pecan.make_app( app_conf.pop('root'), logging=getattr(config, 'logging', {}), debug=CONF.debug, wrap_app=parsable_error.ParsableErrorMiddleware, **app_conf ) return acl.install(app, CONF, config.app.acl_public_routes) class VersionSelectorApplication(object): def __init__(self): pc = get_pecan_config() self.v1 = setup_app(config=pc) def __call__(self, environ, start_response): return self.v1(environ, start_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/app.wsgi0000664000175000017500000000161200000000000020536 0ustar00zuulzuul00000000000000# -*- mode: python -*- # -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Use this file for deploying the API service under Apache2 mod_wsgi. """ # This script is deprecated and it will be removed in U release. # Please switch to automatically generated watcher-api-wsgi script instead. from watcher.api import wsgi application = wsgi.initialize_wsgi_app(show_deprecated=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/config.py0000664000175000017500000000313700000000000020706 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.api import hooks # Server Specific Configurations # See https://pecan.readthedocs.org/en/latest/configuration.html#server-configuration # noqa server = { 'port': '9322', 'host': '127.0.0.1' } # Pecan Application Configurations # See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa acl_public_routes = ['/'] if not cfg.CONF.api.get("enable_webhooks_auth"): acl_public_routes.append('/v1/webhooks/.*') app = { 'root': 'watcher.api.controllers.root.RootController', 'modules': ['watcher.api'], 'hooks': [ hooks.ContextHook(), hooks.NoExceptionTracebackHook(), ], 'static_root': '%(confdir)s/public', 'enable_acl': True, 'acl_public_routes': acl_public_routes, } # WSME Configurations # See https://wsme.readthedocs.org/en/latest/integrate.html#configuration wsme = { 'debug': cfg.CONF.get("debug") if "debug" in cfg.CONF else False, } PECAN_CONFIG = { "server": server, "app": app, "wsme": wsme, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6231353 python_watcher-14.0.0/watcher/api/controllers/0000775000175000017500000000000000000000000021431 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/__init__.py0000664000175000017500000000000000000000000023530 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/base.py0000664000175000017500000001045500000000000022722 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import functools import microversion_parse from webob import exc import wsme from wsme import types as wtypes class APIBase(wtypes.Base): created_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is created""" updated_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is updated""" deleted_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is deleted""" def as_dict(self): """Render this object as a dict of its fields.""" return dict((k, getattr(self, k)) for k in self.fields if hasattr(self, k) and getattr(self, k) != wsme.Unset) def unset_fields_except(self, except_list=None): """Unset fields so they don't appear in the message body. :param except_list: A list of fields that won't be touched. """ if except_list is None: except_list = [] for k in self.as_dict(): if k not in except_list: setattr(self, k, wsme.Unset) @functools.total_ordering class Version(object): """API Version object.""" string = 'OpenStack-API-Version' """HTTP Header string carrying the requested version""" min_string = 'OpenStack-API-Minimum-Version' """HTTP response header""" max_string = 'OpenStack-API-Maximum-Version' """HTTP response header""" def __init__(self, headers, default_version, latest_version): """Create an API Version object from the supplied headers. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :raises: webob.HTTPNotAcceptable """ (self.major, self.minor) = Version.parse_headers( headers, default_version, latest_version) def __repr__(self): return '%s.%s' % (self.major, self.minor) @staticmethod def parse_headers(headers, default_version, latest_version): """Determine the API version requested based on the headers supplied. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :returns: a tuple of (major, minor) version numbers :raises: webob.HTTPNotAcceptable """ version_str = microversion_parse.get_version( headers, service_type='infra-optim') minimal_version = (1, 0) if version_str is None: # If requested header is wrong, Watcher answers with the minimal # supported version. return minimal_version if version_str.lower() == 'latest': parse_str = latest_version else: parse_str = version_str try: version = tuple(int(i) for i in parse_str.split('.')) except ValueError: version = minimal_version # NOTE (alexchadin): Old python-watcherclient sends requests with # value of version header is "1". It should be transformed to 1.0 as # it was supposed to be. if len(version) == 1 and version[0] == 1: version = minimal_version if len(version) != 2: raise exc.HTTPNotAcceptable( "Invalid value for %s header" % Version.string) return version def __gt__(self, other): return (self.major, self.minor) > (other.major, other.minor) def __eq__(self, other): return (self.major, self.minor) == (other.major, other.minor) def __ne__(self, other): return not self.__eq__(other) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/link.py0000664000175000017500000000376300000000000022751 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import pecan from wsme import types as wtypes from watcher.api.controllers import base def build_url(resource, resource_args, bookmark=False, base_url=None): if base_url is None: base_url = pecan.request.application_url template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' # FIXME(lucasagomes): I'm getting a 404 when doing a GET on # a nested resource that the URL ends with a '/'. # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' return template % {'url': base_url, 'res': resource, 'args': resource_args} class Link(base.APIBase): """A link representation.""" href = wtypes.text """The url of a link.""" rel = wtypes.text """The name of a link.""" type = wtypes.text """Indicates the type of document/link.""" @staticmethod def make_link(rel_name, url, resource, resource_args, bookmark=False, type=wtypes.Unset): href = build_url(resource, resource_args, bookmark=bookmark, base_url=url) return Link(href=href, rel=rel_name, type=type) @classmethod def sample(cls): sample = cls(href="http://localhost:6385/chassis/" "eaaca217-e7d8-47b4-bb41-3f99f20eed89", rel="bookmark") return sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/rest_api_version_history.rst0000664000175000017500000000200200000000000027311 0ustar00zuulzuul00000000000000REST API Version History ======================== This documents the changes made to the REST API with every microversion change. The description for each version should be a verbose one which has enough information to be suitable for use in user documentation. 1.0 (Initial version) ----------------------- This is the initial version of the Watcher API which supports microversions. A user can specify a header in the API request:: OpenStack-API-Version: infra-optim where ```` is any valid api version for this API. If no version is specified then the API will behave as if version 1.0 was requested. 1.1 --- Added the parameters ``start_time`` and ``end_time`` to create audit request. Supported for start and end time of continuous audits. 1.2 --- Added ``force`` into create audit request. If ``force`` is true, audit will be executed despite of ongoing actionplan. 1.3 --- Added list data model API. 1.4 --- Added Watcher webhook API. It can be used to trigger audit with ``event`` type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/root.py0000664000175000017500000000710200000000000022766 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers import v1 class APIStatus(object): CURRENT = "CURRENT" SUPPORTED = "SUPPORTED" DEPRECATED = "DEPRECATED" EXPERIMENTAL = "EXPERIMENTAL" class Version(base.APIBase): """An API version representation.""" id = wtypes.text """The ID of the version, also acts as the release number""" status = wtypes.text """The state of this API version""" max_version = wtypes.text """The maximum version supported""" min_version = wtypes.text """The minimum version supported""" links = [link.Link] """A Link that point to a specific version of the API""" @staticmethod def convert(id, status=APIStatus.CURRENT): v = importlib.import_module('watcher.api.controllers.%s.versions' % id) version = Version() version.id = id version.status = status version.max_version = v.max_version_string() version.min_version = v.min_version_string() version.links = [link.Link.make_link('self', pecan.request.application_url, id, '', bookmark=True)] return version class Root(base.APIBase): name = wtypes.text """The name of the API""" description = wtypes.text """Some information about this API""" versions = [Version] """Links to all the versions available in this API""" default_version = Version """A link to the default version of the API""" @staticmethod def convert(): root = Root() root.name = "OpenStack Watcher API" root.description = ("Watcher is an OpenStack project which aims to " "improve physical resources usage through " "better VM placement.") root.versions = [Version.convert('v1')] root.default_version = Version.convert('v1') return root class RootController(rest.RestController): _versions = ['v1'] """All supported API versions""" _default_version = 'v1' """The default API version""" v1 = v1.Controller() @wsme_pecan.wsexpose(Root) def get(self): # NOTE: The reason why convert() it's being called for every # request is because we need to get the host url from # the request object to make the links. return Root.convert() @pecan.expose() def _route(self, args): """Overrides the default routing behavior. It redirects the request to the default version of the watcher API if the version number is not specified in the url. """ if args[0] and args[0] not in self._versions: args = [self._default_version] + args return super(RootController, self)._route(args) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6231353 python_watcher-14.0.0/watcher/api/controllers/v1/0000775000175000017500000000000000000000000021757 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/__init__.py0000664000175000017500000002525600000000000024102 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Version 1 of the Watcher API NOTE: IN PROGRESS AND NOT FULLY IMPLEMENTED. """ import datetime import pecan from pecan import rest from webob import exc import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import action from watcher.api.controllers.v1 import action_plan from watcher.api.controllers.v1 import audit from watcher.api.controllers.v1 import audit_template from watcher.api.controllers.v1 import data_model from watcher.api.controllers.v1 import goal from watcher.api.controllers.v1 import scoring_engine from watcher.api.controllers.v1 import service from watcher.api.controllers.v1 import strategy from watcher.api.controllers.v1 import utils from watcher.api.controllers.v1 import versions from watcher.api.controllers.v1 import webhooks def min_version(): return base.Version( {base.Version.string: ' '.join([versions.service_type_string(), versions.min_version_string()])}, versions.min_version_string(), versions.max_version_string()) def max_version(): return base.Version( {base.Version.string: ' '.join([versions.service_type_string(), versions.max_version_string()])}, versions.min_version_string(), versions.max_version_string()) class APIBase(wtypes.Base): created_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is created""" updated_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is updated""" deleted_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is deleted""" def as_dict(self): """Render this object as a dict of its fields.""" return dict((k, getattr(self, k)) for k in self.fields if hasattr(self, k) and getattr(self, k) != wsme.Unset) def unset_fields_except(self, except_list=None): """Unset fields so they don't appear in the message body. :param except_list: A list of fields that won't be touched. """ if except_list is None: except_list = [] for k in self.as_dict(): if k not in except_list: setattr(self, k, wsme.Unset) class MediaType(APIBase): """A media type representation.""" base = wtypes.text type = wtypes.text def __init__(self, base, type): self.base = base self.type = type class V1(APIBase): """The representation of the version 1 of the API.""" id = wtypes.text """The ID of the version, also acts as the release number""" media_types = [MediaType] """An array of supcontainersed media types for this version""" audit_templates = [link.Link] """Links to the audit templates resource""" audits = [link.Link] """Links to the audits resource""" data_model = [link.Link] """Links to the data model resource""" actions = [link.Link] """Links to the actions resource""" action_plans = [link.Link] """Links to the action plans resource""" scoring_engines = [link.Link] """Links to the Scoring Engines resource""" services = [link.Link] """Links to the services resource""" webhooks = [link.Link] """Links to the webhooks resource""" links = [link.Link] """Links that point to a specific URL for this version and documentation""" @staticmethod def convert(): v1 = V1() v1.id = "v1" base_url = pecan.request.application_url v1.links = [link.Link.make_link('self', base_url, 'v1', '', bookmark=True), link.Link.make_link('describedby', 'http://docs.openstack.org', 'developer/watcher/dev', 'api-spec-v1.html', bookmark=True, type='text/html') ] v1.media_types = [MediaType('application/json', 'application/vnd.openstack.watcher.v1+json')] v1.audit_templates = [link.Link.make_link('self', base_url, 'audit_templates', ''), link.Link.make_link('bookmark', base_url, 'audit_templates', '', bookmark=True) ] v1.audits = [link.Link.make_link('self', base_url, 'audits', ''), link.Link.make_link('bookmark', base_url, 'audits', '', bookmark=True) ] if utils.allow_list_datamodel(): v1.data_model = [link.Link.make_link('self', base_url, 'data_model', ''), link.Link.make_link('bookmark', base_url, 'data_model', '', bookmark=True) ] v1.actions = [link.Link.make_link('self', base_url, 'actions', ''), link.Link.make_link('bookmark', base_url, 'actions', '', bookmark=True) ] v1.action_plans = [link.Link.make_link( 'self', base_url, 'action_plans', ''), link.Link.make_link('bookmark', base_url, 'action_plans', '', bookmark=True) ] v1.scoring_engines = [link.Link.make_link( 'self', base_url, 'scoring_engines', ''), link.Link.make_link('bookmark', base_url, 'scoring_engines', '', bookmark=True) ] v1.services = [link.Link.make_link( 'self', base_url, 'services', ''), link.Link.make_link('bookmark', base_url, 'services', '', bookmark=True) ] if utils.allow_webhook_api(): v1.webhooks = [link.Link.make_link( 'self', base_url, 'webhooks', ''), link.Link.make_link('bookmark', base_url, 'webhooks', '', bookmark=True) ] return v1 class Controller(rest.RestController): """Version 1 API controller root.""" audits = audit.AuditsController() audit_templates = audit_template.AuditTemplatesController() actions = action.ActionsController() action_plans = action_plan.ActionPlansController() goals = goal.GoalsController() scoring_engines = scoring_engine.ScoringEngineController() services = service.ServicesController() strategies = strategy.StrategiesController() data_model = data_model.DataModelController() webhooks = webhooks.WebhookController() @wsme_pecan.wsexpose(V1) def get(self): # NOTE: The reason why convert() it's being called for every # request is because we need to get the host url from # the request object to make the links. return V1.convert() def _check_version(self, version, headers=None): if headers is None: headers = {} # ensure that major version in the URL matches the header if version.major != versions.BASE_VERSION: raise exc.HTTPNotAcceptable( "Mutually exclusive versions requested. Version %(ver)s " "requested but not supported by this service. The supported " "version range is: [%(min)s, %(max)s]." % {'ver': version, 'min': versions.min_version_string(), 'max': versions.max_version_string()}, headers=headers) # ensure the minor version is within the supported range if version < min_version() or version > max_version(): raise exc.HTTPNotAcceptable( "Version %(ver)s was requested but the minor version is not " "supported by this service. The supported version range is: " "[%(min)s, %(max)s]." % {'ver': version, 'min': versions.min_version_string(), 'max': versions.max_version_string()}, headers=headers) @pecan.expose() def _route(self, args, request=None): v = base.Version(pecan.request.headers, versions.min_version_string(), versions.max_version_string()) # The Vary header is used as a hint to caching proxies and user agents # that the response is also dependent on the OpenStack-API-Version and # not just the body and query parameters. See RFC 7231 for details. pecan.response.headers['Vary'] = base.Version.string # Always set the min and max headers pecan.response.headers[base.Version.min_string] = ( versions.min_version_string()) pecan.response.headers[base.Version.max_string] = ( versions.max_version_string()) # assert that requested version is supported self._check_version(v, pecan.response.headers) pecan.response.headers[base.Version.string] = ( ' '.join([versions.service_type_string(), str(v)])) pecan.request.version = v return super(Controller, self)._route(args, request) __all__ = ("Controller", ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/action.py0000664000175000017500000004044600000000000023616 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Action ` is what enables Watcher to transform the current state of a :ref:`Cluster ` after an :ref:`Audit `. An :ref:`Action ` is an atomic task which changes the current state of a target :ref:`Managed resource ` of the OpenStack :ref:`Cluster ` such as: - Live migration of an instance from one compute node to another compute node with Nova - Changing the power level of a compute node (ACPI level, ...) - Changing the current state of a compute node (enable or disable) with Nova In most cases, an :ref:`Action ` triggers some concrete commands on an existing OpenStack module (Nova, Neutron, Cinder, Ironic, etc.). An :ref:`Action ` has a life-cycle and its current state may be one of the following: - **PENDING** : the :ref:`Action ` has not been executed yet by the :ref:`Watcher Applier ` - **ONGOING** : the :ref:`Action ` is currently being processed by the :ref:`Watcher Applier ` - **SUCCEEDED** : the :ref:`Action ` has been executed successfully - **FAILED** : an error occurred while trying to execute the :ref:`Action ` - **DELETED** : the :ref:`Action ` is still stored in the :ref:`Watcher database ` but is not returned any more through the Watcher APIs. - **CANCELLED** : the :ref:`Action ` was in **PENDING** or **ONGOING** state and was cancelled by the :ref:`Administrator ` :ref:`Some default implementations are provided `, but it is possible to :ref:`develop new implementations ` which are dynamically loaded by Watcher at launch time. """ from http import HTTPStatus from oslo_utils import timeutils import pecan from pecan import rest import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import exception from watcher.common import policy from watcher import objects def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class ActionPatchType(types.JsonPatchType): @staticmethod def mandatory_attrs(): return [] class Action(base.APIBase): """API representation of a action. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a action. """ _action_plan_uuid = None def _get_action_plan_uuid(self): return self._action_plan_uuid def _set_action_plan_uuid(self, value): if value == wtypes.Unset: self._action_plan_uuid = wtypes.Unset elif value and self._action_plan_uuid != value: try: action_plan = objects.ActionPlan.get( pecan.request.context, value) self._action_plan_uuid = action_plan.uuid self.action_plan_id = action_plan.id except exception.ActionPlanNotFound: self._action_plan_uuid = None uuid = wtypes.wsattr(types.uuid, readonly=True) """Unique UUID for this action""" action_plan_uuid = wtypes.wsproperty(types.uuid, _get_action_plan_uuid, _set_action_plan_uuid, mandatory=True) """The action plan this action belongs to """ state = wtypes.text """This audit state""" action_type = wtypes.text """Action type""" description = wtypes.text """Action description""" input_parameters = types.jsontype """One or more key/value pairs """ parents = wtypes.wsattr(types.jsontype, readonly=True) """UUIDs of parent actions""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated action links""" def __init__(self, **kwargs): super(Action, self).__init__() self.fields = [] fields = list(objects.Action.fields) fields.append('action_plan_uuid') for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) self.fields.append('action_plan_id') self.fields.append('description') setattr(self, 'action_plan_uuid', kwargs.get('action_plan_id', wtypes.Unset)) @staticmethod def _convert_with_links(action, url, expand=True): if not expand: action.unset_fields_except(['uuid', 'state', 'action_plan_uuid', 'action_plan_id', 'action_type', 'parents']) action.links = [link.Link.make_link('self', url, 'actions', action.uuid), link.Link.make_link('bookmark', url, 'actions', action.uuid, bookmark=True) ] return action @classmethod def convert_with_links(cls, action, expand=True): action = Action(**action.as_dict()) try: obj_action_desc = objects.ActionDescription.get_by_type( pecan.request.context, action.action_type) description = obj_action_desc.description except exception.ActionDescriptionNotFound: description = "" setattr(action, 'description', description) hide_fields_in_newer_versions(action) return cls._convert_with_links(action, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', description='action description', state='PENDING', created_at=timeutils.utcnow(), deleted_at=None, updated_at=timeutils.utcnow(), parents=[]) sample._action_plan_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ActionCollection(collection.Collection): """API representation of a collection of actions.""" actions = [Action] """A list containing actions objects""" def __init__(self, **kwargs): self._type = 'actions' @staticmethod def convert_with_links(actions, limit, url=None, expand=False, **kwargs): collection = ActionCollection() collection.actions = [Action.convert_with_links(p, expand) for p in actions] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.actions = [Action.sample(expand=False)] return sample class ActionsController(rest.RestController): """REST controller for Actions.""" def __init__(self): super(ActionsController, self).__init__() from_actions = False """A flag to indicate if the requests to this controller are coming from the top-level resource Actions.""" _custom_actions = { 'detail': ['GET'], } def _get_actions_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None, action_plan_uuid=None, audit_uuid=None): additional_fields = ['action_plan_uuid'] api_utils.validate_sort_key(sort_key, list(objects.Action.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Action.get_by_uuid(pecan.request.context, marker) filters = {} if action_plan_uuid: filters['action_plan_uuid'] = action_plan_uuid if audit_uuid: filters['audit_uuid'] = audit_uuid need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) actions = objects.Action.list(pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) actions_collection = ActionCollection.convert_with_links( actions, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(actions_collection.actions, sort_key, sort_dir) return actions_collection @wsme_pecan.wsexpose(ActionCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, types.uuid) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', action_plan_uuid=None, audit_uuid=None): """Retrieve a list of actions. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param action_plan_uuid: Optional UUID of an action plan, to get only actions for that action plan. :param audit_uuid: Optional UUID of an audit, to get only actions for that audit. """ context = pecan.request.context policy.enforce(context, 'action:get_all', action='action:get_all') if action_plan_uuid and audit_uuid: raise exception.ActionFilterCombinationProhibited return self._get_actions_collection( marker, limit, sort_key, sort_dir, action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid) @wsme_pecan.wsexpose(ActionCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, types.uuid) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc', action_plan_uuid=None, audit_uuid=None): """Retrieve a list of actions with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param action_plan_uuid: Optional UUID of an action plan, to get only actions for that action plan. :param audit_uuid: Optional UUID of an audit, to get only actions for that audit. """ context = pecan.request.context policy.enforce(context, 'action:detail', action='action:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "actions": raise exception.HTTPNotFound if action_plan_uuid and audit_uuid: raise exception.ActionFilterCombinationProhibited expand = True resource_url = '/'.join(['actions', 'detail']) return self._get_actions_collection( marker, limit, sort_key, sort_dir, expand, resource_url, action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid) @wsme_pecan.wsexpose(Action, types.uuid) def get_one(self, action_uuid): """Retrieve information about the given action. :param action_uuid: UUID of a action. """ if self.from_actions: raise exception.OperationNotPermitted context = pecan.request.context action = api_utils.get_resource('Action', action_uuid) policy.enforce(context, 'action:get', action, action='action:get') return Action.convert_with_links(action) @wsme_pecan.wsexpose(Action, body=Action, status_code=HTTPStatus.CREATED) def post(self, action): """Create a new action(forbidden). :param action: a action within the request body. """ # FIXME: blueprint edit-action-plan-flow raise exception.OperationNotPermitted( _("Cannot create an action directly")) if self.from_actions: raise exception.OperationNotPermitted action_dict = action.as_dict() context = pecan.request.context new_action = objects.Action(context, **action_dict) new_action.create() # Set the HTTP Location Header pecan.response.location = link.build_url('actions', new_action.uuid) return Action.convert_with_links(new_action) @wsme.validate(types.uuid, [ActionPatchType]) @wsme_pecan.wsexpose(Action, types.uuid, body=[ActionPatchType]) def patch(self, action_uuid, patch): """Update an existing action(forbidden). :param action_uuid: UUID of a action. :param patch: a json PATCH document to apply to this action. """ # FIXME: blueprint edit-action-plan-flow raise exception.OperationNotPermitted( _("Cannot modify an action directly")) if self.from_actions: raise exception.OperationNotPermitted action_to_update = objects.Action.get_by_uuid(pecan.request.context, action_uuid) try: action_dict = action_to_update.as_dict() action = Action(**api_utils.apply_jsonpatch(action_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.Action.fields: try: patch_val = getattr(action, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if action_to_update[field] != patch_val: action_to_update[field] = patch_val action_to_update.save() return Action.convert_with_links(action_to_update) @wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) def delete(self, action_uuid): """Delete a action(forbidden). :param action_uuid: UUID of a action. """ # FIXME: blueprint edit-action-plan-flow raise exception.OperationNotPermitted( _("Cannot delete an action directly")) action_to_delete = objects.Action.get_by_uuid( pecan.request.context, action_uuid) action_to_delete.soft_delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/action_plan.py0000664000175000017500000005644300000000000024634 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Action Plan ` specifies a flow of :ref:`Actions ` that should be executed in order to satisfy a given :ref:`Goal `. It also contains an estimated :ref:`global efficacy ` alongside a set of :ref:`efficacy indicators `. An :ref:`Action Plan ` is generated by Watcher when an :ref:`Audit ` is successful which implies that the :ref:`Strategy ` which was used has found a :ref:`Solution ` to achieve the :ref:`Goal ` of this :ref:`Audit `. In the default implementation of Watcher, an action plan is composed of a list of successive :ref:`Actions ` (i.e., a Workflow of :ref:`Actions ` belonging to a unique branch). However, Watcher provides abstract interfaces for many of its components, allowing other implementations to generate and handle more complex :ref:`Action Plan(s) ` composed of two types of Action Item(s): - simple :ref:`Actions `: atomic tasks, which means it can not be split into smaller tasks or commands from an OpenStack point of view. - composite Actions: which are composed of several simple :ref:`Actions ` ordered in sequential and/or parallel flows. An :ref:`Action Plan ` may be described using standard workflow model description formats such as `Business Process Model and Notation 2.0 (BPMN 2.0) `_ or `Unified Modeling Language (UML) `_. To see the life-cycle and description of :ref:`Action Plan ` states, visit :ref:`the Action Plan state machine `. """ from http import HTTPStatus from oslo_log import log from oslo_utils import timeutils import pecan from pecan import rest import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.applier import rpcapi from watcher.common import exception from watcher.common import policy from watcher.common import utils from watcher import objects from watcher.objects import action_plan as ap_objects LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class ActionPlanPatchType(types.JsonPatchType): @staticmethod def _validate_state(patch): serialized_patch = {'path': patch.path, 'op': patch.op} if patch.value is not wtypes.Unset: serialized_patch['value'] = patch.value # todo: use state machines to handle state transitions state_value = patch.value if state_value and not hasattr(ap_objects.State, state_value): msg = _("Invalid state: %(state)s") raise exception.PatchError( patch=serialized_patch, reason=msg % dict(state=state_value)) @staticmethod def validate(patch): if patch.path == "/state": ActionPlanPatchType._validate_state(patch) return types.JsonPatchType.validate(patch) @staticmethod def internal_attrs(): return types.JsonPatchType.internal_attrs() @staticmethod def mandatory_attrs(): return ["audit_id", "state"] class ActionPlan(base.APIBase): """API representation of a action plan. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of an action plan. """ _audit_uuid = None _strategy_uuid = None _strategy_name = None _efficacy_indicators = None def _get_audit_uuid(self): return self._audit_uuid def _set_audit_uuid(self, value): if value == wtypes.Unset: self._audit_uuid = wtypes.Unset elif value and self._audit_uuid != value: try: audit = objects.Audit.get(pecan.request.context, value) self._audit_uuid = audit.uuid self.audit_id = audit.id except exception.AuditNotFound: self._audit_uuid = None def _get_efficacy_indicators(self): if self._efficacy_indicators is None: self._set_efficacy_indicators(wtypes.Unset) return self._efficacy_indicators def _set_efficacy_indicators(self, value): efficacy_indicators = [] if value == wtypes.Unset and not self._efficacy_indicators: try: _efficacy_indicators = objects.EfficacyIndicator.list( pecan.request.context, filters={"action_plan_uuid": self.uuid}) for indicator in _efficacy_indicators: efficacy_indicator = efficacyindicator.EfficacyIndicator( context=pecan.request.context, name=indicator.name, description=indicator.description, unit=indicator.unit, value=float(indicator.value), ) efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators = efficacy_indicators except exception.EfficacyIndicatorNotFound as exc: LOG.exception(exc) elif value and self._efficacy_indicators != value: self._efficacy_indicators = value def _get_strategy(self, value): if value == wtypes.Unset: return None strategy = None try: if utils.is_uuid_like(value) or utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id return strategy def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if value and self._strategy_uuid != value: self._strategy_uuid = None strategy = self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if value and self._strategy_name != value: self._strategy_name = None strategy = self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid = wtypes.wsattr(types.uuid, readonly=True) """Unique UUID for this action plan""" audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, mandatory=True) """The UUID of the audit this port belongs to""" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) """Strategy UUID the action plan refers to""" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) """The name of the strategy this action plan refers to""" efficacy_indicators = wtypes.wsproperty( types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, mandatory=True) """The list of efficacy indicators associated to this action plan""" global_efficacy = wtypes.wsattr(types.jsontype, readonly=True) """The global efficacy of this action plan""" state = wtypes.text """This action plan state""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated action links""" hostname = wtypes.wsattr(wtypes.text, mandatory=False) """Hostname the actionplan is running on""" def __init__(self, **kwargs): super(ActionPlan, self).__init__() self.fields = [] fields = list(objects.ActionPlan.fields) for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) self.fields.append('audit_uuid') self.fields.append('efficacy_indicators') setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(action_plan, url, expand=True): if not expand: action_plan.unset_fields_except( ['uuid', 'state', 'efficacy_indicators', 'global_efficacy', 'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name']) action_plan.links = [ link.Link.make_link( 'self', url, 'action_plans', action_plan.uuid), link.Link.make_link( 'bookmark', url, 'action_plans', action_plan.uuid, bookmark=True)] return action_plan @classmethod def convert_with_links(cls, rpc_action_plan, expand=True): action_plan = ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan) return cls._convert_with_links(action_plan, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING', created_at=timeutils.utcnow(), deleted_at=None, updated_at=timeutils.utcnow()) sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators = [{'description': 'Test indicator', 'name': 'test_indicator', 'unit': '%'}] sample._global_efficacy = {'description': 'Global efficacy', 'name': 'test_global_efficacy', 'unit': '%'} return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ActionPlanCollection(collection.Collection): """API representation of a collection of action_plans.""" action_plans = [ActionPlan] """A list containing action_plans objects""" def __init__(self, **kwargs): self._type = 'action_plans' @staticmethod def convert_with_links(rpc_action_plans, limit, url=None, expand=False, **kwargs): ap_collection = ActionPlanCollection() ap_collection.action_plans = [ActionPlan.convert_with_links( p, expand) for p in rpc_action_plans] ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs) return ap_collection @classmethod def sample(cls): sample = cls() sample.action_plans = [ActionPlan.sample(expand=False)] return sample class ActionPlansController(rest.RestController): """REST controller for Actions.""" def __init__(self): super(ActionPlansController, self).__init__() self.applier_client = rpcapi.ApplierAPI() from_actionsPlans = False """A flag to indicate if the requests to this controller are coming from the top-level resource ActionPlan.""" _custom_actions = { 'start': ['POST'], 'detail': ['GET'] } def _get_action_plans_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None, audit_uuid=None, strategy=None): additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name'] api_utils.validate_sort_key( sort_key, list(objects.ActionPlan.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.ActionPlan.get_by_uuid( pecan.request.context, marker) filters = {} if audit_uuid: filters['audit_uuid'] = audit_uuid if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name'] = strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) action_plans = objects.ActionPlan.list( pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) action_plans_collection = ActionPlanCollection.convert_with_links( action_plans, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans, sort_key, sort_dir) return action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): """Retrieve a list of action plans. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param audit_uuid: Optional UUID of an audit, to get only actions for that audit. :param strategy: strategy UUID or name to filter by """ context = pecan.request.context policy.enforce(context, 'action_plan:get_all', action='action_plan:get_all') return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): """Retrieve a list of action_plans with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param audit_uuid: Optional UUID of an audit, to get only actions for that audit. :param strategy: strategy UUID or name to filter by """ context = pecan.request.context policy.enforce(context, 'action_plan:detail', action='action_plan:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "action_plans": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['action_plans', 'detail']) return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, expand, resource_url, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def get_one(self, action_plan_uuid): """Retrieve information about the given action plan. :param action_plan_uuid: UUID of a action plan. """ if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid) policy.enforce( context, 'action_plan:get', action_plan, action='action_plan:get') return ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) def delete(self, action_plan_uuid): """Delete an action plan. :param action_plan_uuid: UUID of a action. """ context = pecan.request.context action_plan = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:delete', action_plan, action='action_plan:delete') allowed_states = (ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED, ap_objects.State.FAILED, ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED) if action_plan.state not in allowed_states: raise exception.DeleteError( state=action_plan.state) action_plan.soft_delete() @wsme.validate(types.uuid, [ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan, types.uuid, body=[ActionPlanPatchType]) def patch(self, action_plan_uuid, patch): """Update an existing action plan. :param action_plan_uuid: UUID of a action plan. :param patch: a json PATCH document to apply to this action plan. """ if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan_to_update = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:update', action_plan_to_update, action='action_plan:update') try: action_plan_dict = action_plan_to_update.as_dict() action_plan = ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) launch_action_plan = False cancel_action_plan = False # transitions that are allowed via PATCH allowed_patch_transitions = [ (ap_objects.State.RECOMMENDED, ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED), (ap_objects.State.ONGOING, ap_objects.State.CANCELLING), (ap_objects.State.PENDING, ap_objects.State.CANCELLED), ] # todo: improve this in blueprint watcher-api-validation if hasattr(action_plan, 'state'): transition = (action_plan_to_update.state, action_plan.state) if transition not in allowed_patch_transitions: error_message = _("State transition not allowed: " "(%(initial_state)s -> %(new_state)s)") raise exception.PatchError( patch=patch, reason=error_message % dict( initial_state=action_plan_to_update.state, new_state=action_plan.state)) if action_plan.state == ap_objects.State.PENDING: launch_action_plan = True if action_plan.state == ap_objects.State.CANCELLED: cancel_action_plan = True # Update only the fields that have changed for field in objects.ActionPlan.fields: try: patch_val = getattr(action_plan, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if action_plan_to_update[field] != patch_val: action_plan_to_update[field] = patch_val if (field == 'state' and patch_val == objects.action_plan.State.PENDING): launch_action_plan = True action_plan_to_update.save() # NOTE: if action plan is cancelled from pending or recommended # state update action state here only if cancel_action_plan: filters = {'action_plan_uuid': action_plan.uuid} actions = objects.Action.list(pecan.request.context, filters=filters, eager=True) for a in actions: a.state = objects.action.State.CANCELLED a.save() if launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid) action_plan_to_update = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def start(self, action_plan_uuid, **kwargs): """Start an action_plan :param action_plan_uuid: UUID of an action_plan. """ action_plan_to_start = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) context = pecan.request.context policy.enforce(context, 'action_plan:start', action_plan_to_start, action='action_plan:start') if action_plan_to_start['state'] != \ objects.action_plan.State.RECOMMENDED: raise exception.StartError( state=action_plan_to_start.state) action_plan_to_start['state'] = objects.action_plan.State.PENDING action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context, action_plan_uuid) action_plan_to_start = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_start) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/audit.py0000664000175000017500000006656400000000000023460 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ In the Watcher system, an :ref:`Audit ` is a request for optimizing a :ref:`Cluster `. The optimization is done in order to satisfy one :ref:`Goal ` on a given :ref:`Cluster `. For each :ref:`Audit `, the Watcher system generates an :ref:`Action Plan `. To see the life-cycle and description of an :ref:`Audit ` states, visit :ref:`the Audit State machine `. """ import datetime from dateutil import tz from http import HTTPStatus from oslo_log import log from oslo_utils import timeutils import pecan from pecan import rest import wsme from wsme import types as wtypes from wsme import utils as wutils import wsmeext.pecan as wsme_pecan from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import exception from watcher.common import policy from watcher.common import utils from watcher.decision_engine import rpcapi from watcher import objects LOG = log.getLogger(__name__) def _get_object_by_value(context, class_name, value): if utils.is_uuid_like(value) or utils.is_int_like(value): return class_name.get(context, value) else: return class_name.get_by_name(context, value) def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ if not api_utils.allow_start_end_audit_time(): obj.start_time = wtypes.Unset obj.end_time = wtypes.Unset if not api_utils.allow_force(): obj.force = wtypes.Unset class AuditPostType(wtypes.Base): name = wtypes.wsattr(wtypes.text, mandatory=False) audit_template_uuid = wtypes.wsattr(types.uuid, mandatory=False) goal = wtypes.wsattr(wtypes.text, mandatory=False) strategy = wtypes.wsattr(wtypes.text, mandatory=False) audit_type = wtypes.wsattr(wtypes.text, mandatory=True) state = wtypes.wsattr(wtypes.text, readonly=True, default=objects.audit.State.PENDING) parameters = wtypes.wsattr({wtypes.text: types.jsontype}, mandatory=False, default={}) interval = wtypes.wsattr(types.interval_or_cron, mandatory=False) scope = wtypes.wsattr(types.jsontype, readonly=True) auto_trigger = wtypes.wsattr(bool, mandatory=False) hostname = wtypes.wsattr(wtypes.text, readonly=True, mandatory=False) start_time = wtypes.wsattr(datetime.datetime, mandatory=False) end_time = wtypes.wsattr(datetime.datetime, mandatory=False) force = wtypes.wsattr(bool, mandatory=False) def as_audit(self, context): audit_type_values = [val.value for val in objects.audit.AuditType] if self.audit_type not in audit_type_values: raise exception.AuditTypeNotFound(audit_type=self.audit_type) if (self.audit_type == objects.audit.AuditType.ONESHOT.value and self.interval not in (wtypes.Unset, None)): raise exception.AuditIntervalNotAllowed(audit_type=self.audit_type) if (self.audit_type == objects.audit.AuditType.CONTINUOUS.value and self.interval in (wtypes.Unset, None)): raise exception.AuditIntervalNotSpecified( audit_type=self.audit_type) if self.audit_template_uuid and self.goal: raise exception.Invalid('Either audit_template_uuid ' 'or goal should be provided.') if (self.audit_type == objects.audit.AuditType.ONESHOT.value and (self.start_time not in (wtypes.Unset, None) or self.end_time not in (wtypes.Unset, None))): raise exception.AuditStartEndTimeNotAllowed( audit_type=self.audit_type) if not api_utils.allow_start_end_audit_time(): for field in ('start_time', 'end_time'): if getattr(self, field) not in (wtypes.Unset, None): raise exception.NotAcceptable() # If audit_template_uuid was provided, we will provide any # variables not included in the request, but not override # those variables that were included. if self.audit_template_uuid: try: audit_template = objects.AuditTemplate.get( context, self.audit_template_uuid) except exception.AuditTemplateNotFound: raise exception.Invalid( message=_('The audit template UUID or name specified is ' 'invalid')) at2a = { 'goal': 'goal_id', 'strategy': 'strategy_id', 'scope': 'scope', } to_string_fields = set(['goal', 'strategy']) for k in at2a: if not getattr(self, k): try: at_attr = getattr(audit_template, at2a[k]) if at_attr and (k in to_string_fields): at_attr = str(at_attr) setattr(self, k, at_attr) except AttributeError: pass # Note: If audit name was not provided, used a default name if not self.name: if self.strategy: strategy = _get_object_by_value(context, objects.Strategy, self.strategy) self.name = "%s-%s" % (strategy.name, timeutils.utcnow().isoformat()) elif self.audit_template_uuid: audit_template = objects.AuditTemplate.get( context, self.audit_template_uuid) self.name = "%s-%s" % (audit_template.name, timeutils.utcnow().isoformat()) else: goal = _get_object_by_value(context, objects.Goal, self.goal) self.name = "%s-%s" % (goal.name, timeutils.utcnow().isoformat()) # No more than 63 characters if len(self.name) > 63: LOG.warning("Audit: %s length exceeds 63 characters", self.name) self.name = self.name[0:63] return Audit( name=self.name, audit_type=self.audit_type, parameters=self.parameters, goal_id=self.goal, strategy_id=self.strategy, interval=self.interval, scope=self.scope, auto_trigger=self.auto_trigger, start_time=self.start_time, end_time=self.end_time, force=self.force) class AuditPatchType(types.JsonPatchType): @staticmethod def mandatory_attrs(): return ['/audit_template_uuid', '/type'] @staticmethod def validate(patch): def is_new_state_none(p): return p.path == '/state' and p.op == 'replace' and p.value is None serialized_patch = {'path': patch.path, 'op': patch.op, 'value': patch.value} if (patch.path in AuditPatchType.mandatory_attrs() or is_new_state_none(patch)): msg = _("%(field)s can't be updated.") raise exception.PatchError( patch=serialized_patch, reason=msg % dict(field=patch.path)) return types.JsonPatchType.validate(patch) class Audit(base.APIBase): """API representation of an audit. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of an audit. """ _goal_uuid = None _goal_name = None _strategy_uuid = None _strategy_name = None def _get_goal(self, value): if value == wtypes.Unset: return None goal = None try: if utils.is_uuid_like(value) or utils.is_int_like(value): goal = objects.Goal.get( pecan.request.context, value) else: goal = objects.Goal.get_by_name( pecan.request.context, value) except exception.GoalNotFound: pass if goal: self.goal_id = goal.id return goal def _get_goal_uuid(self): return self._goal_uuid def _set_goal_uuid(self, value): if value and self._goal_uuid != value: self._goal_uuid = None goal = self._get_goal(value) if goal: self._goal_uuid = goal.uuid def _get_goal_name(self): return self._goal_name def _set_goal_name(self, value): if value and self._goal_name != value: self._goal_name = None goal = self._get_goal(value) if goal: self._goal_name = goal.name def _get_strategy(self, value): if value == wtypes.Unset: return None strategy = None try: if utils.is_uuid_like(value) or utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id return strategy def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if value and self._strategy_uuid != value: self._strategy_uuid = None strategy = self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if value and self._strategy_name != value: self._strategy_name = None strategy = self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid = types.uuid """Unique UUID for this audit""" name = wtypes.text """Name of this audit""" audit_type = wtypes.text """Type of this audit""" state = wtypes.text """This audit state""" goal_uuid = wtypes.wsproperty( wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True) """Goal UUID the audit refers to""" goal_name = wtypes.wsproperty( wtypes.text, _get_goal_name, _set_goal_name, mandatory=False) """The name of the goal this audit refers to""" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) """Strategy UUID the audit refers to""" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) """The name of the strategy this audit refers to""" parameters = {wtypes.text: types.jsontype} """The strategy parameters for this audit""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated audit links""" interval = wtypes.wsattr(wtypes.text, mandatory=False) """Launch audit periodically (in seconds)""" scope = wtypes.wsattr(types.jsontype, mandatory=False) """Audit Scope""" auto_trigger = wtypes.wsattr(bool, mandatory=False, default=False) """Autoexecute action plan once audit is succeeded""" next_run_time = wtypes.wsattr(datetime.datetime, mandatory=False) """The next time audit launch""" hostname = wtypes.wsattr(wtypes.text, mandatory=False) """Hostname the audit is running on""" start_time = wtypes.wsattr(datetime.datetime, mandatory=False) """The start time for continuous audit launch""" end_time = wtypes.wsattr(datetime.datetime, mandatory=False) """The end time that stopping continuous audit""" force = wsme.wsattr(bool, mandatory=False, default=False) """Allow Action Plan of this Audit be executed in parallel with other Action Plan""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.Audit.fields) for k in fields: # Skip fields we do not expose. if not hasattr(self, k): continue self.fields.append(k) setattr(self, k, kwargs.get(k, wtypes.Unset)) self.fields.append('goal_id') self.fields.append('strategy_id') fields.append('goal_uuid') setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset)) fields.append('goal_name') setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(audit, url, expand=True): if not expand: audit.unset_fields_except(['uuid', 'name', 'audit_type', 'state', 'goal_uuid', 'interval', 'scope', 'strategy_uuid', 'goal_name', 'strategy_name', 'auto_trigger', 'next_run_time']) audit.links = [link.Link.make_link('self', url, 'audits', audit.uuid), link.Link.make_link('bookmark', url, 'audits', audit.uuid, bookmark=True) ] return audit @classmethod def convert_with_links(cls, rpc_audit, expand=True): audit = Audit(**rpc_audit.as_dict()) hide_fields_in_newer_versions(audit) return cls._convert_with_links(audit, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='My Audit', audit_type='ONESHOT', state='PENDING', created_at=timeutils.utcnow(), deleted_at=None, updated_at=timeutils.utcnow(), interval='7200', scope=[], auto_trigger=False, next_run_time=timeutils.utcnow(), start_time=timeutils.utcnow(), end_time=timeutils.utcnow()) sample.goal_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' sample.strategy_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ff' return cls._convert_with_links(sample, 'http://localhost:9322', expand) class AuditCollection(collection.Collection): """API representation of a collection of audits.""" audits = [Audit] """A list containing audits objects""" def __init__(self, **kwargs): super(AuditCollection, self).__init__() self._type = 'audits' @staticmethod def convert_with_links(rpc_audits, limit, url=None, expand=False, **kwargs): collection = AuditCollection() collection.audits = [Audit.convert_with_links(p, expand) for p in rpc_audits] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.audits = [Audit.sample(expand=False)] return sample class AuditsController(rest.RestController): """REST controller for Audits.""" def __init__(self): super(AuditsController, self).__init__() self.dc_client = rpcapi.DecisionEngineAPI() from_audits = False """A flag to indicate if the requests to this controller are coming from the top-level resource Audits.""" _custom_actions = { 'detail': ['GET'], } def _get_audits_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None, goal=None, strategy=None): additional_fields = ["goal_uuid", "goal_name", "strategy_uuid", "strategy_name"] api_utils.validate_sort_key( sort_key, list(objects.Audit.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Audit.get_by_uuid(pecan.request.context, marker) filters = {} if goal: if utils.is_uuid_like(goal): filters['goal_uuid'] = goal else: # TODO(michaelgugino): add method to get goal by name. filters['goal_name'] = goal if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: # TODO(michaelgugino): add method to get goal by name. filters['strategy_name'] = strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) audits = objects.Audit.list(pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) audits_collection = AuditCollection.convert_with_links( audits, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(audits_collection.audits, sort_key, sort_dir) return audits_collection @wsme_pecan.wsexpose(AuditCollection, types.uuid, int, wtypes.text, wtypes.text, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', goal=None, strategy=None): """Retrieve a list of audits. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param goal: goal UUID or name to filter by :param strategy: strategy UUID or name to filter by """ context = pecan.request.context policy.enforce(context, 'audit:get_all', action='audit:get_all') return self._get_audits_collection(marker, limit, sort_key, sort_dir, goal=goal, strategy=strategy) @wsme_pecan.wsexpose(AuditCollection, wtypes.text, types.uuid, int, wtypes.text, wtypes.text) def detail(self, goal=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of audits with detail. :param goal: goal UUID or name to filter by :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'audit:detail', action='audit:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "audits": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['audits', 'detail']) return self._get_audits_collection(marker, limit, sort_key, sort_dir, expand, resource_url, goal=goal) @wsme_pecan.wsexpose(Audit, wtypes.text) def get_one(self, audit): """Retrieve information about the given audit. :param audit: UUID or name of an audit. """ if self.from_audits: raise exception.OperationNotPermitted context = pecan.request.context rpc_audit = api_utils.get_resource('Audit', audit) policy.enforce(context, 'audit:get', rpc_audit, action='audit:get') return Audit.convert_with_links(rpc_audit) @wsme_pecan.wsexpose(Audit, body=AuditPostType, status_code=HTTPStatus.CREATED) def post(self, audit_p): """Create a new audit. :param audit_p: an audit within the request body. """ context = pecan.request.context policy.enforce(context, 'audit:create', action='audit:create') audit = audit_p.as_audit(context) if self.from_audits: raise exception.OperationNotPermitted if not audit._goal_uuid: raise exception.Invalid( message=_('A valid goal_id or audit_template_id ' 'must be provided')) strategy_uuid = audit.strategy_uuid no_schema = True if strategy_uuid is not None: # validate parameter when predefined strategy in audit template strategy = objects.Strategy.get(pecan.request.context, strategy_uuid) schema = strategy.parameters_spec if schema: # validate input parameter with default value feedback no_schema = False utils.StrictDefaultValidatingDraft4Validator(schema).validate( audit.parameters) if no_schema and audit.parameters: raise exception.Invalid(_('Specify parameters but no predefined ' 'strategy for audit, or no ' 'parameter spec in predefined strategy')) audit_dict = audit.as_dict() # convert local time to UTC time start_time_value = audit_dict.get('start_time') end_time_value = audit_dict.get('end_time') if start_time_value: audit_dict['start_time'] = start_time_value.replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) if end_time_value: audit_dict['end_time'] = end_time_value.replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) new_audit = objects.Audit(context, **audit_dict) new_audit.create() # Set the HTTP Location Header pecan.response.location = link.build_url('audits', new_audit.uuid) # trigger decision-engine to run the audit if new_audit.audit_type == objects.audit.AuditType.ONESHOT.value: self.dc_client.trigger_audit(context, new_audit.uuid) return Audit.convert_with_links(new_audit) @wsme.validate(types.uuid, [AuditPatchType]) @wsme_pecan.wsexpose(Audit, wtypes.text, body=[AuditPatchType]) def patch(self, audit, patch): """Update an existing audit. :param audit: UUID or name of an audit. :param patch: a json PATCH document to apply to this audit. """ if self.from_audits: raise exception.OperationNotPermitted context = pecan.request.context audit_to_update = api_utils.get_resource( 'Audit', audit, eager=True) policy.enforce(context, 'audit:update', audit_to_update, action='audit:update') try: audit_dict = audit_to_update.as_dict() initial_state = audit_dict['state'] new_state = api_utils.get_patch_value(patch, 'state') if not api_utils.check_audit_state_transition( patch, initial_state): error_message = _("State transition not allowed: " "(%(initial_state)s -> %(new_state)s)") raise exception.PatchError( patch=patch, reason=error_message % dict( initial_state=initial_state, new_state=new_state)) patch_path = api_utils.get_patch_key(patch, 'path') if patch_path in ('start_time', 'end_time'): patch_value = api_utils.get_patch_value(patch, patch_path) # convert string format to UTC time new_patch_value = wutils.parse_isodatetime( patch_value).replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) api_utils.set_patch_value(patch, patch_path, new_patch_value) audit = Audit(**api_utils.apply_jsonpatch(audit_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.Audit.fields: try: patch_val = getattr(audit, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if audit_to_update[field] != patch_val: audit_to_update[field] = patch_val audit_to_update.save() return Audit.convert_with_links(audit_to_update) @wsme_pecan.wsexpose(None, wtypes.text, status_code=HTTPStatus.NO_CONTENT) def delete(self, audit): """Delete an audit. :param audit: UUID or name of an audit. """ context = pecan.request.context audit_to_delete = api_utils.get_resource( 'Audit', audit, eager=True) policy.enforce(context, 'audit:delete', audit_to_delete, action='audit:delete') initial_state = audit_to_delete.state new_state = objects.audit.State.DELETED if not objects.audit.AuditStateTransitionManager( ).check_transition(initial_state, new_state): raise exception.DeleteError( state=initial_state) audit_to_delete.soft_delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/audit_template.py0000664000175000017500000006654700000000000025354 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Audit ` may be launched several times with the same settings (:ref:`Goal `, thresholds, ...). Therefore it makes sense to save those settings in some sort of Audit preset object, which is known as an :ref:`Audit Template `. An :ref:`Audit Template ` contains at least the :ref:`Goal ` of the :ref:`Audit `. It may also contain some error handling settings indicating whether: - :ref:`Watcher Applier ` stops the entire operation - :ref:`Watcher Applier ` performs a rollback and how many retries should be attempted before failure occurs (also the latter can be complex: for example the scenario in which there are many first-time failures on ultimately successful :ref:`Actions `). Moreover, an :ref:`Audit Template ` may contain some settings related to the level of automation for the :ref:`Action Plan ` that will be generated by the :ref:`Audit `. A flag will indicate whether the :ref:`Action Plan ` will be launched automatically or will need a manual confirmation from the :ref:`Administrator `. """ from http import HTTPStatus from oslo_utils import timeutils import pecan from pecan import rest import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import context as context_utils from watcher.common import exception from watcher.common import policy from watcher.common import utils as common_utils from watcher.decision_engine.loading import default as default_loading from watcher import objects def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class AuditTemplatePostType(wtypes.Base): _ctx = context_utils.make_context() name = wtypes.wsattr(wtypes.text, mandatory=True) """Name of this audit template""" description = wtypes.wsattr(wtypes.text, mandatory=False) """Short description of this audit template""" goal = wtypes.wsattr(wtypes.text, mandatory=True) """Goal UUID or name of the audit template""" strategy = wtypes.wsattr(wtypes.text, mandatory=False) """Strategy UUID or name of the audit template""" scope = wtypes.wsattr(types.jsontype, mandatory=False, default=[]) """Audit Scope""" def as_audit_template(self): return AuditTemplate( name=self.name, description=self.description, goal_id=self.goal, # Dirty trick ... goal=self.goal, strategy_id=self.strategy, # Dirty trick ... strategy_uuid=self.strategy, scope=self.scope, ) @staticmethod def _build_schema(): SCHEMA = { "$schema": "http://json-schema.org/draft-04/schema#", "type": "array", "items": { "type": "object", "properties": AuditTemplatePostType._get_schemas(), "additionalProperties": False } } return SCHEMA @staticmethod def _get_schemas(): collectors = default_loading.ClusterDataModelCollectorLoader( ).list_available() schemas = {k: c.SCHEMA for k, c in collectors.items() if hasattr(c, "SCHEMA")} return schemas @staticmethod def validate(audit_template): available_goals = objects.Goal.list(AuditTemplatePostType._ctx) available_goal_uuids_map = {g.uuid: g for g in available_goals} available_goal_names_map = {g.name: g for g in available_goals} if audit_template.goal in available_goal_uuids_map: goal = available_goal_uuids_map[audit_template.goal] elif audit_template.goal in available_goal_names_map: goal = available_goal_names_map[audit_template.goal] else: raise exception.InvalidGoal(goal=audit_template.goal) if audit_template.scope: keys = [list(s)[0] for s in audit_template.scope] if keys[0] not in ('compute', 'storage'): audit_template.scope = [dict(compute=audit_template.scope)] common_utils.Draft4Validator( AuditTemplatePostType._build_schema() ).validate(audit_template.scope) include_host_aggregates = False exclude_host_aggregates = False for rule in audit_template.scope[0]['compute']: if 'host_aggregates' in rule: include_host_aggregates = True elif 'exclude' in rule: for resource in rule['exclude']: if 'host_aggregates' in resource: exclude_host_aggregates = True if include_host_aggregates and exclude_host_aggregates: raise exception.Invalid( message=_( "host_aggregates can't be " "included and excluded together")) if audit_template.strategy: try: if (common_utils.is_uuid_like(audit_template.strategy) or common_utils.is_int_like(audit_template.strategy)): strategy = objects.Strategy.get( AuditTemplatePostType._ctx, audit_template.strategy) else: strategy = objects.Strategy.get_by_name( AuditTemplatePostType._ctx, audit_template.strategy) except Exception: raise exception.InvalidStrategy( strategy=audit_template.strategy) # Check that the strategy we indicate is actually related to the # specified goal if strategy.goal_id != goal.id: available_strategies = objects.Strategy.list( AuditTemplatePostType._ctx) choices = ["'%s' (%s)" % (s.uuid, s.name) for s in available_strategies] raise exception.InvalidStrategy( message=_( "'%(strategy)s' strategy does relate to the " "'%(goal)s' goal. Possible choices: %(choices)s") % dict(strategy=strategy.name, goal=goal.name, choices=", ".join(choices))) audit_template.strategy = strategy.uuid # We force the UUID so that we do not need to query the DB with the # name afterwards audit_template.goal = goal.uuid return audit_template class AuditTemplatePatchType(types.JsonPatchType): _ctx = context_utils.make_context() @staticmethod def mandatory_attrs(): return [] @staticmethod def validate(patch): if patch.path == "/goal" and patch.op != "remove": AuditTemplatePatchType._validate_goal(patch) elif patch.path == "/goal" and patch.op == "remove": raise exception.OperationNotPermitted( _("Cannot remove 'goal' attribute " "from an audit template")) if patch.path == "/strategy": AuditTemplatePatchType._validate_strategy(patch) return types.JsonPatchType.validate(patch) @staticmethod def _validate_goal(patch): patch.path = "/goal_id" goal = patch.value if goal: available_goals = objects.Goal.list( AuditTemplatePatchType._ctx) available_goal_uuids_map = {g.uuid: g for g in available_goals} available_goal_names_map = {g.name: g for g in available_goals} if goal in available_goal_uuids_map: patch.value = available_goal_uuids_map[goal].id elif goal in available_goal_names_map: patch.value = available_goal_names_map[goal].id else: raise exception.InvalidGoal(goal=goal) @staticmethod def _validate_strategy(patch): patch.path = "/strategy_id" strategy = patch.value if strategy: available_strategies = objects.Strategy.list( AuditTemplatePatchType._ctx) available_strategy_uuids_map = { s.uuid: s for s in available_strategies} available_strategy_names_map = { s.name: s for s in available_strategies} if strategy in available_strategy_uuids_map: patch.value = available_strategy_uuids_map[strategy].id elif strategy in available_strategy_names_map: patch.value = available_strategy_names_map[strategy].id else: raise exception.InvalidStrategy(strategy=strategy) class AuditTemplate(base.APIBase): """API representation of a audit template. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of an audit template. """ _goal_uuid = None _goal_name = None _strategy_uuid = None _strategy_name = None def _get_goal(self, value): if value == wtypes.Unset: return None goal = None try: if (common_utils.is_uuid_like(value) or common_utils.is_int_like(value)): goal = objects.Goal.get( pecan.request.context, value) else: goal = objects.Goal.get_by_name( pecan.request.context, value) except exception.GoalNotFound: pass if goal: self.goal_id = goal.id return goal def _get_strategy(self, value): if value == wtypes.Unset: return None strategy = None try: if (common_utils.is_uuid_like(value) or common_utils.is_int_like(value)): strategy = objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id return strategy def _get_goal_uuid(self): return self._goal_uuid def _set_goal_uuid(self, value): if value and self._goal_uuid != value: self._goal_uuid = None goal = self._get_goal(value) if goal: self._goal_uuid = goal.uuid def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if value and self._strategy_uuid != value: self._strategy_uuid = None strategy = self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def _get_goal_name(self): return self._goal_name def _set_goal_name(self, value): if value and self._goal_name != value: self._goal_name = None goal = self._get_goal(value) if goal: self._goal_name = goal.name def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if value and self._strategy_name != value: self._strategy_name = None strategy = self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid = wtypes.wsattr(types.uuid, readonly=True) """Unique UUID for this audit template""" name = wtypes.text """Name of this audit template""" description = wtypes.wsattr(wtypes.text, mandatory=False) """Short description of this audit template""" goal_uuid = wtypes.wsproperty( wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True) """Goal UUID the audit template refers to""" goal_name = wtypes.wsproperty( wtypes.text, _get_goal_name, _set_goal_name, mandatory=False) """The name of the goal this audit template refers to""" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) """Strategy UUID the audit template refers to""" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) """The name of the strategy this audit template refers to""" audits = wtypes.wsattr([link.Link], readonly=True) """Links to the collection of audits contained in this audit template""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated audit template links""" scope = wtypes.wsattr(types.jsontype, mandatory=False) """Audit Scope""" def __init__(self, **kwargs): super(AuditTemplate, self).__init__() self.fields = [] fields = list(objects.AuditTemplate.fields) for k in fields: # Skip fields we do not expose. if not hasattr(self, k): continue self.fields.append(k) setattr(self, k, kwargs.get(k, wtypes.Unset)) self.fields.append('goal_id') self.fields.append('strategy_id') setattr(self, 'strategy_id', kwargs.get('strategy_id', wtypes.Unset)) # goal_uuid & strategy_uuid are not part of # objects.AuditTemplate.fields because they're API-only attributes. self.fields.append('goal_uuid') self.fields.append('goal_name') self.fields.append('strategy_uuid') self.fields.append('strategy_name') setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset)) setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset)) setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(audit_template, url, expand=True): if not expand: audit_template.unset_fields_except( ['uuid', 'name', 'goal_uuid', 'goal_name', 'scope', 'strategy_uuid', 'strategy_name']) # The numeric ID should not be exposed to # the user, it's internal only. audit_template.goal_id = wtypes.Unset audit_template.strategy_id = wtypes.Unset audit_template.links = [link.Link.make_link('self', url, 'audit_templates', audit_template.uuid), link.Link.make_link('bookmark', url, 'audit_templates', audit_template.uuid, bookmark=True)] return audit_template @classmethod def convert_with_links(cls, rpc_audit_template, expand=True): audit_template = AuditTemplate(**rpc_audit_template.as_dict()) hide_fields_in_newer_versions(audit_template) return cls._convert_with_links(audit_template, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='My Audit Template', description='Description of my audit template', goal_uuid='83e44733-b640-40e2-8d8a-7dd3be7134e6', strategy_uuid='367d826e-b6a4-4b70-bc44-c3f6fe1c9986', created_at=timeutils.utcnow(), deleted_at=None, updated_at=timeutils.utcnow(), scope=[],) return cls._convert_with_links(sample, 'http://localhost:9322', expand) class AuditTemplateCollection(collection.Collection): """API representation of a collection of audit templates.""" audit_templates = [AuditTemplate] """A list containing audit templates objects""" def __init__(self, **kwargs): super(AuditTemplateCollection, self).__init__() self._type = 'audit_templates' @staticmethod def convert_with_links(rpc_audit_templates, limit, url=None, expand=False, **kwargs): at_collection = AuditTemplateCollection() at_collection.audit_templates = [ AuditTemplate.convert_with_links(p, expand) for p in rpc_audit_templates] at_collection.next = at_collection.get_next(limit, url=url, **kwargs) return at_collection @classmethod def sample(cls): sample = cls() sample.audit_templates = [AuditTemplate.sample(expand=False)] return sample class AuditTemplatesController(rest.RestController): """REST controller for AuditTemplates.""" def __init__(self): super(AuditTemplatesController, self).__init__() from_audit_templates = False """A flag to indicate if the requests to this controller are coming from the top-level resource AuditTemplates.""" _custom_actions = { 'detail': ['GET'], } def _get_audit_templates_collection(self, filters, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): additional_fields = ["goal_uuid", "goal_name", "strategy_uuid", "strategy_name"] api_utils.validate_sort_key( sort_key, list(objects.AuditTemplate.fields) + additional_fields) api_utils.validate_search_filters( filters, list(objects.AuditTemplate.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.AuditTemplate.get_by_uuid( pecan.request.context, marker) need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) audit_templates = objects.AuditTemplate.list( pecan.request.context, filters, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir) audit_templates_collection = \ AuditTemplateCollection.convert_with_links( audit_templates, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort( audit_templates_collection.audit_templates, sort_key, sort_dir) return audit_templates_collection @wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, goal=None, strategy=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of audit templates. :param goal: goal UUID or name to filter by :param strategy: strategy UUID or name to filter by :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'audit_template:get_all', action='audit_template:get_all') filters = {} if goal: if common_utils.is_uuid_like(goal): filters['goal_uuid'] = goal else: filters['goal_name'] = goal if strategy: if common_utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name'] = strategy return self._get_audit_templates_collection( filters, marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text, types.uuid, int, wtypes.text, wtypes.text) def detail(self, goal=None, strategy=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of audit templates with detail. :param goal: goal UUID or name to filter by :param strategy: strategy UUID or name to filter by :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'audit_template:detail', action='audit_template:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "audit_templates": raise exception.HTTPNotFound filters = {} if goal: if common_utils.is_uuid_like(goal): filters['goal_uuid'] = goal else: filters['goal_name'] = goal if strategy: if common_utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name'] = strategy expand = True resource_url = '/'.join(['audit_templates', 'detail']) return self._get_audit_templates_collection(filters, marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(AuditTemplate, wtypes.text) def get_one(self, audit_template): """Retrieve information about the given audit template. :param audit_template: UUID or name of an audit template. """ if self.from_audit_templates: raise exception.OperationNotPermitted context = pecan.request.context rpc_audit_template = api_utils.get_resource('AuditTemplate', audit_template) policy.enforce(context, 'audit_template:get', rpc_audit_template, action='audit_template:get') return AuditTemplate.convert_with_links(rpc_audit_template) @wsme.validate(types.uuid, AuditTemplatePostType) @wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplatePostType, status_code=HTTPStatus.CREATED) def post(self, audit_template_postdata): """Create a new audit template. :param audit_template_postdata: the audit template POST data from the request body. """ if self.from_audit_templates: raise exception.OperationNotPermitted context = pecan.request.context policy.enforce(context, 'audit_template:create', action='audit_template:create') context = pecan.request.context audit_template = audit_template_postdata.as_audit_template() audit_template_dict = audit_template.as_dict() new_audit_template = objects.AuditTemplate(context, **audit_template_dict) new_audit_template.create() # Set the HTTP Location Header pecan.response.location = link.build_url( 'audit_templates', new_audit_template.uuid) return AuditTemplate.convert_with_links(new_audit_template) @wsme.validate(types.uuid, [AuditTemplatePatchType]) @wsme_pecan.wsexpose(AuditTemplate, wtypes.text, body=[AuditTemplatePatchType]) def patch(self, audit_template, patch): """Update an existing audit template. :param template_uuid: UUID of a audit template. :param patch: a json PATCH document to apply to this audit template. """ if self.from_audit_templates: raise exception.OperationNotPermitted context = pecan.request.context audit_template_to_update = api_utils.get_resource('AuditTemplate', audit_template) policy.enforce(context, 'audit_template:update', audit_template_to_update, action='audit_template:update') if common_utils.is_uuid_like(audit_template): audit_template_to_update = objects.AuditTemplate.get_by_uuid( pecan.request.context, audit_template) else: audit_template_to_update = objects.AuditTemplate.get_by_name( pecan.request.context, audit_template) try: audit_template_dict = audit_template_to_update.as_dict() audit_template = AuditTemplate(**api_utils.apply_jsonpatch( audit_template_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.AuditTemplate.fields: try: patch_val = getattr(audit_template, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if audit_template_to_update[field] != patch_val: audit_template_to_update[field] = patch_val audit_template_to_update.save() return AuditTemplate.convert_with_links(audit_template_to_update) @wsme_pecan.wsexpose(None, wtypes.text, status_code=HTTPStatus.NO_CONTENT) def delete(self, audit_template): """Delete a audit template. :param template_uuid: UUID or name of an audit template. """ context = pecan.request.context audit_template_to_delete = api_utils.get_resource('AuditTemplate', audit_template) policy.enforce(context, 'audit_template:delete', audit_template_to_delete, action='audit_template:delete') audit_template_to_delete.soft_delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/collection.py0000664000175000017500000000333400000000000024467 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import pecan from wsme import types as wtypes from watcher.api.controllers import base from watcher.api.controllers import link class Collection(base.APIBase): next = wtypes.text """A link to retrieve the next subset of the collection""" @property def collection(self): return getattr(self, self._type) def has_next(self, limit): """Return whether collection has more items.""" return len(self.collection) and len(self.collection) == limit def get_next(self, limit, url=None, marker_field="uuid", **kwargs): """Return a link to the next subset of the collection.""" if not self.has_next(limit): return wtypes.Unset resource_url = url or self._type q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { 'args': q_args, 'limit': limit, 'marker': getattr(self.collection[-1], marker_field)} return link.Link.make_link('next', pecan.request.host_url, resource_url, next_args).href ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/data_model.py0000664000175000017500000000514600000000000024430 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An Interface for users and admin to List Data Model. """ import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils from watcher.common import exception from watcher.common import policy from watcher.decision_engine import rpcapi class DataModelController(rest.RestController): """REST controller for data model""" def __init__(self): super(DataModelController, self).__init__() from_data_model = False """A flag to indicate if the requests to this controller are coming from the top-level resource DataModel.""" @wsme_pecan.wsexpose(wtypes.text, wtypes.text, types.uuid) def get_all(self, data_model_type='compute', audit_uuid=None): """Retrieve information about the given data model. :param data_model_type: The type of data model user wants to list. Supported values: compute. Future support values: storage, baremetal. The default value is compute. :param audit_uuid: The UUID of the audit, used to filter data model by the scope in audit. """ if not utils.allow_list_datamodel(): raise exception.NotAcceptable if self.from_data_model: raise exception.OperationNotPermitted allowed_data_model_type = [ 'compute', ] if data_model_type not in allowed_data_model_type: raise exception.DataModelTypeNotFound( data_model_type=data_model_type) context = pecan.request.context de_client = rpcapi.DecisionEngineAPI() policy.enforce(context, 'data_model:get_all', action='data_model:get_all') rpc_all_data_model = de_client.get_data_model_info( context, data_model_type, audit_uuid) return rpc_all_data_model ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/efficacy_indicator.py0000664000175000017500000000523700000000000026145 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An efficacy indicator is a single value that gives an indication on how the :ref:`solution ` produced by a given :ref:`strategy ` performed. These efficacy indicators are specific to a given :ref:`goal ` and are usually used to compute the :ref:`global efficacy ` of the resulting :ref:`action plan `. In Watcher, these efficacy indicators are specified alongside the goal they relate to. When a strategy (which always relates to a goal) is executed, it produces a solution containing the efficacy indicators specified by the goal. This solution, which has been translated by the :ref:`Watcher Planner ` into an action plan, will see its indicators and global efficacy stored and would now be accessible through the :ref:`Watcher API `. """ import numbers from wsme import types as wtypes from watcher.api.controllers import base from watcher import objects class EfficacyIndicator(base.APIBase): """API representation of a efficacy indicator. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of an efficacy indicator. """ name = wtypes.wsattr(wtypes.text, mandatory=True) """Name of this efficacy indicator""" description = wtypes.wsattr(wtypes.text, mandatory=False) """Description of this efficacy indicator""" unit = wtypes.wsattr(wtypes.text, mandatory=False) """Unit of this efficacy indicator""" value = wtypes.wsattr(numbers.Number, mandatory=True) """Value of this efficacy indicator""" def __init__(self, **kwargs): super(EfficacyIndicator, self).__init__() self.fields = [] fields = list(objects.EfficacyIndicator.fields) for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/goal.py0000664000175000017500000002133500000000000023257 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :ref:`Goal ` is a human readable, observable and measurable end result having one objective to be achieved. Here are some examples of :ref:`Goals `: - minimize the energy consumption - minimize the number of compute nodes (consolidation) - balance the workload among compute nodes - minimize the license cost (some software have a licensing model which is based on the number of sockets or cores where the software is deployed) - find the most appropriate moment for a planned maintenance on a given group of host (which may be an entire availability zone): power supply replacement, cooling system replacement, hardware modification, ... """ import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import exception from watcher.common import policy from watcher import objects def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class Goal(base.APIBase): """API representation of a goal. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a goal. """ uuid = types.uuid """Unique UUID for this goal""" name = wtypes.text """Name of the goal""" display_name = wtypes.text """Localized name of the goal""" efficacy_specification = wtypes.wsattr(types.jsontype, readonly=True) """Efficacy specification for this goal""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated audit template links""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.Goal.fields) for k in fields: # Skip fields we do not expose. if not hasattr(self, k): continue self.fields.append(k) setattr(self, k, kwargs.get(k, wtypes.Unset)) @staticmethod def _convert_with_links(goal, url, expand=True): if not expand: goal.unset_fields_except(['uuid', 'name', 'display_name', 'efficacy_specification']) goal.links = [link.Link.make_link('self', url, 'goals', goal.uuid), link.Link.make_link('bookmark', url, 'goals', goal.uuid, bookmark=True)] return goal @classmethod def convert_with_links(cls, goal, expand=True): goal = Goal(**goal.as_dict()) hide_fields_in_newer_versions(goal) return cls._convert_with_links(goal, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls( uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='DUMMY', display_name='Dummy strategy', efficacy_specification=[ {'description': 'Dummy indicator', 'name': 'dummy', 'schema': 'Range(min=0, max=100, min_included=True, ' 'max_included=True, msg=None)', 'unit': '%'} ]) return cls._convert_with_links(sample, 'http://localhost:9322', expand) class GoalCollection(collection.Collection): """API representation of a collection of goals.""" goals = [Goal] """A list containing goals objects""" def __init__(self, **kwargs): super(GoalCollection, self).__init__() self._type = 'goals' @staticmethod def convert_with_links(goals, limit, url=None, expand=False, **kwargs): goal_collection = GoalCollection() goal_collection.goals = [ Goal.convert_with_links(g, expand) for g in goals] goal_collection.next = goal_collection.get_next( limit, url=url, **kwargs) return goal_collection @classmethod def sample(cls): sample = cls() sample.goals = [Goal.sample(expand=False)] return sample class GoalsController(rest.RestController): """REST controller for Goals.""" def __init__(self): super(GoalsController, self).__init__() from_goals = False """A flag to indicate if the requests to this controller are coming from the top-level resource Goals.""" _custom_actions = { 'detail': ['GET'], } def _get_goals_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): api_utils.validate_sort_key( sort_key, list(objects.Goal.fields)) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Goal.get_by_uuid( pecan.request.context, marker) sort_db_key = (sort_key if sort_key in objects.Goal.fields else None) goals = objects.Goal.list(pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir) return GoalCollection.convert_with_links(goals, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @wsme_pecan.wsexpose(GoalCollection, wtypes.text, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of goals. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'goal:get_all', action='goal:get_all') return self._get_goals_collection(marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(GoalCollection, wtypes.text, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of goals with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'goal:detail', action='goal:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "goals": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['goals', 'detail']) return self._get_goals_collection(marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(Goal, wtypes.text) def get_one(self, goal): """Retrieve information about the given goal. :param goal: UUID or name of the goal. """ if self.from_goals: raise exception.OperationNotPermitted context = pecan.request.context rpc_goal = api_utils.get_resource('Goal', goal) policy.enforce(context, 'goal:get', rpc_goal, action='goal:get') return Goal.convert_with_links(rpc_goal) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/scoring_engine.py0000664000175000017500000002205700000000000025330 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2016 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :ref:`Scoring Engine ` is an executable that has a well-defined input, a well-defined output, and performs a purely mathematical task. That is, the calculation does not depend on the environment in which it is running - it would produce the same result anywhere. Because there might be multiple algorithms used to build a particular data model (and therefore a scoring engine), the usage of scoring engine might vary. A metainfo field is supposed to contain any information which might be needed by the user of a given scoring engine. """ import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import exception from watcher.common import policy from watcher import objects def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class ScoringEngine(base.APIBase): """API representation of a scoring engine. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a scoring engine. """ uuid = types.uuid """Unique UUID of the scoring engine""" name = wtypes.text """The name of the scoring engine""" description = wtypes.text """A human readable description of the Scoring Engine""" metainfo = wtypes.text """A metadata associated with the scoring engine""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated action links""" def __init__(self, **kwargs): super(ScoringEngine, self).__init__() self.fields = [] self.fields.append('uuid') self.fields.append('name') self.fields.append('description') self.fields.append('metainfo') setattr(self, 'uuid', kwargs.get('uuid', wtypes.Unset)) setattr(self, 'name', kwargs.get('name', wtypes.Unset)) setattr(self, 'description', kwargs.get('description', wtypes.Unset)) setattr(self, 'metainfo', kwargs.get('metainfo', wtypes.Unset)) @staticmethod def _convert_with_links(se, url, expand=True): if not expand: se.unset_fields_except( ['uuid', 'name', 'description']) se.links = [link.Link.make_link('self', url, 'scoring_engines', se.uuid), link.Link.make_link('bookmark', url, 'scoring_engines', se.uuid, bookmark=True)] return se @classmethod def convert_with_links(cls, scoring_engine, expand=True): scoring_engine = ScoringEngine(**scoring_engine.as_dict()) hide_fields_in_newer_versions(scoring_engine) return cls._convert_with_links( scoring_engine, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='81bbd3c7-3b08-4d12-a268-99354dbf7b71', name='sample-se-123', description='Sample Scoring Engine 123 just for testing') return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ScoringEngineCollection(collection.Collection): """API representation of a collection of scoring engines.""" scoring_engines = [ScoringEngine] """A list containing scoring engine objects""" def __init__(self, **kwargs): super(ScoringEngineCollection, self).__init__() self._type = 'scoring_engines' @staticmethod def convert_with_links(scoring_engines, limit, url=None, expand=False, **kwargs): collection = ScoringEngineCollection() collection.scoring_engines = [ScoringEngine.convert_with_links( se, expand) for se in scoring_engines] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.scoring_engines = [ScoringEngine.sample(expand=False)] return sample class ScoringEngineController(rest.RestController): """REST controller for Scoring Engines.""" def __init__(self): super(ScoringEngineController, self).__init__() from_scoring_engines = False """A flag to indicate if the requests to this controller are coming from the top-level resource Scoring Engines.""" _custom_actions = { 'detail': ['GET'], } def _get_scoring_engines_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): api_utils.validate_sort_key( sort_key, list(objects.ScoringEngine.fields)) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.ScoringEngine.get_by_uuid( pecan.request.context, marker) filters = {} sort_db_key = (sort_key if sort_key in objects.ScoringEngine.fields else None) scoring_engines = objects.ScoringEngine.list( context=pecan.request.context, limit=limit, marker=marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) return ScoringEngineCollection.convert_with_links( scoring_engines, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @wsme_pecan.wsexpose(ScoringEngineCollection, wtypes.text, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of Scoring Engines. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: name. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'scoring_engine:get_all', action='scoring_engine:get_all') return self._get_scoring_engines_collection( marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(ScoringEngineCollection, wtypes.text, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of Scoring Engines with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: name. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'scoring_engine:detail', action='scoring_engine:detail') parent = pecan.request.path.split('/')[:-1][-1] if parent != "scoring_engines": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['scoring_engines', 'detail']) return self._get_scoring_engines_collection( marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(ScoringEngine, wtypes.text) def get_one(self, scoring_engine): """Retrieve information about the given Scoring Engine. :param scoring_engine_name: The name of the Scoring Engine. """ context = pecan.request.context policy.enforce(context, 'scoring_engine:get', action='scoring_engine:get') if self.from_scoring_engines: raise exception.OperationNotPermitted rpc_scoring_engine = api_utils.get_resource( 'ScoringEngine', scoring_engine) return ScoringEngine.convert_with_links(rpc_scoring_engine) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/service.py0000664000175000017500000002265700000000000024005 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Service mechanism provides ability to monitor Watcher services state. """ import datetime from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import utils as api_utils from watcher.common import context from watcher.common import exception from watcher.common import policy from watcher import objects CONF = cfg.CONF LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class Service(base.APIBase): """API representation of a service. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a service. """ _status = None _context = context.RequestContext(is_admin=True) def _get_status(self): return self._status def _set_status(self, id): service = objects.Service.get(pecan.request.context, id) last_heartbeat = (service.last_seen_up or service.updated_at or service.created_at) if isinstance(last_heartbeat, str): # NOTE(russellb) If this service came in over rpc via # conductor, then the timestamp will be a string and needs to be # converted back to a datetime. last_heartbeat = timeutils.parse_strtime(last_heartbeat) else: # Objects have proper UTC timezones, but the timeutils comparison # below does not (and will fail) last_heartbeat = last_heartbeat.replace(tzinfo=None) elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) is_up = abs(elapsed) <= CONF.service_down_time if not is_up: LOG.warning('Seems service %(name)s on host %(host)s is down. ' 'Last heartbeat was %(lhb)s.' 'Elapsed time is %(el)s', {'name': service.name, 'host': service.host, 'lhb': str(last_heartbeat), 'el': str(elapsed)}) self._status = objects.service.ServiceStatus.FAILED else: self._status = objects.service.ServiceStatus.ACTIVE id = wtypes.wsattr(int, readonly=True) """ID for this service.""" name = wtypes.text """Name of the service.""" host = wtypes.text """Host where service is placed on.""" last_seen_up = wtypes.wsattr(datetime.datetime, readonly=True) """Time when Watcher service sent latest heartbeat.""" status = wtypes.wsproperty(wtypes.text, _get_status, _set_status, mandatory=True) links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link.""" def __init__(self, **kwargs): super(Service, self).__init__() fields = list(objects.Service.fields) + ['status'] self.fields = [] for field in fields: self.fields.append(field) setattr(self, field, kwargs.get( field if field != 'status' else 'id', wtypes.Unset)) @staticmethod def _convert_with_links(service, url, expand=True): if not expand: service.unset_fields_except( ['id', 'name', 'host', 'status']) service.links = [ link.Link.make_link('self', url, 'services', str(service.id)), link.Link.make_link('bookmark', url, 'services', str(service.id), bookmark=True)] return service @classmethod def convert_with_links(cls, service, expand=True): service = Service(**service.as_dict()) hide_fields_in_newer_versions(service) return cls._convert_with_links( service, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(id=1, name='watcher-applier', host='Controller', last_seen_up=datetime.datetime(2016, 1, 1)) return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ServiceCollection(collection.Collection): """API representation of a collection of services.""" services = [Service] """A list containing services objects""" def __init__(self, **kwargs): super(ServiceCollection, self).__init__() self._type = 'services' @staticmethod def convert_with_links(services, limit, url=None, expand=False, **kwargs): service_collection = ServiceCollection() service_collection.services = [ Service.convert_with_links(g, expand) for g in services] service_collection.next = service_collection.get_next( limit, url=url, marker_field='id', **kwargs) return service_collection @classmethod def sample(cls): sample = cls() sample.services = [Service.sample(expand=False)] return sample class ServicesController(rest.RestController): """REST controller for Services.""" def __init__(self): super(ServicesController, self).__init__() from_services = False """A flag to indicate if the requests to this controller are coming from the top-level resource Services.""" _custom_actions = { 'detail': ['GET'], } def _get_services_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): api_utils.validate_sort_key( sort_key, list(objects.Service.fields)) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Service.get( pecan.request.context, marker) sort_db_key = (sort_key if sort_key in objects.Service.fields else None) services = objects.Service.list( pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir) return ServiceCollection.convert_with_links( services, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @wsme_pecan.wsexpose(ServiceCollection, int, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of services. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'service:get_all', action='service:get_all') return self._get_services_collection(marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(ServiceCollection, int, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of services with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'service:detail', action='service:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "services": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['services', 'detail']) return self._get_services_collection( marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(Service, wtypes.text) def get_one(self, service): """Retrieve information about the given service. :param service: ID or name of the service. """ if self.from_services: raise exception.OperationNotPermitted context = pecan.request.context rpc_service = api_utils.get_resource('Service', service) policy.enforce(context, 'service:get', rpc_service, action='service:get') return Service.convert_with_links(rpc_service) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/strategy.py0000664000175000017500000003046500000000000024203 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :ref:`Strategy ` is an algorithm implementation which is able to find a :ref:`Solution ` for a given :ref:`Goal `. There may be several potential strategies which are able to achieve the same :ref:`Goal `. This is why it is possible to configure which specific :ref:`Strategy ` should be used for each goal. Some strategies may provide better optimization results but may take more time to find an optimal :ref:`Solution `. """ import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import exception from watcher.common import policy from watcher.common import utils as common_utils from watcher.decision_engine import rpcapi from watcher import objects def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class Strategy(base.APIBase): """API representation of a strategy. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a strategy. """ _goal_uuid = None _goal_name = None def _get_goal(self, value): if value == wtypes.Unset: return None goal = None try: if (common_utils.is_uuid_like(value) or common_utils.is_int_like(value)): goal = objects.Goal.get(pecan.request.context, value) else: goal = objects.Goal.get_by_name(pecan.request.context, value) except exception.GoalNotFound: pass if goal: self.goal_id = goal.id return goal def _get_goal_uuid(self): return self._goal_uuid def _set_goal_uuid(self, value): if value and self._goal_uuid != value: self._goal_uuid = None goal = self._get_goal(value) if goal: self._goal_uuid = goal.uuid def _get_goal_name(self): return self._goal_name def _set_goal_name(self, value): if value and self._goal_name != value: self._goal_name = None goal = self._get_goal(value) if goal: self._goal_name = goal.name uuid = types.uuid """Unique UUID for this strategy""" name = wtypes.text """Name of the strategy""" display_name = wtypes.text """Localized name of the strategy""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated goal links""" goal_uuid = wtypes.wsproperty(wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True) """The UUID of the goal this audit refers to""" goal_name = wtypes.wsproperty(wtypes.text, _get_goal_name, _set_goal_name, mandatory=False) """The name of the goal this audit refers to""" parameters_spec = {wtypes.text: types.jsontype} """Parameters spec dict""" def __init__(self, **kwargs): super(Strategy, self).__init__() self.fields = [] self.fields.append('uuid') self.fields.append('name') self.fields.append('display_name') self.fields.append('goal_uuid') self.fields.append('goal_name') self.fields.append('parameters_spec') setattr(self, 'uuid', kwargs.get('uuid', wtypes.Unset)) setattr(self, 'name', kwargs.get('name', wtypes.Unset)) setattr(self, 'display_name', kwargs.get('display_name', wtypes.Unset)) setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset)) setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset)) setattr(self, 'parameters_spec', kwargs.get('parameters_spec', wtypes.Unset)) @staticmethod def _convert_with_links(strategy, url, expand=True): if not expand: strategy.unset_fields_except( ['uuid', 'name', 'display_name', 'goal_uuid', 'goal_name']) strategy.links = [ link.Link.make_link('self', url, 'strategies', strategy.uuid), link.Link.make_link('bookmark', url, 'strategies', strategy.uuid, bookmark=True)] return strategy @classmethod def convert_with_links(cls, strategy, expand=True): strategy = Strategy(**strategy.as_dict()) hide_fields_in_newer_versions(strategy) return cls._convert_with_links( strategy, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='DUMMY', display_name='Dummy strategy') return cls._convert_with_links(sample, 'http://localhost:9322', expand) class StrategyCollection(collection.Collection): """API representation of a collection of strategies.""" strategies = [Strategy] """A list containing strategies objects""" def __init__(self, **kwargs): super(StrategyCollection, self).__init__() self._type = 'strategies' @staticmethod def convert_with_links(strategies, limit, url=None, expand=False, **kwargs): strategy_collection = StrategyCollection() strategy_collection.strategies = [ Strategy.convert_with_links(g, expand) for g in strategies] strategy_collection.next = strategy_collection.get_next( limit, url=url, **kwargs) return strategy_collection @classmethod def sample(cls): sample = cls() sample.strategies = [Strategy.sample(expand=False)] return sample class StrategiesController(rest.RestController): """REST controller for Strategies.""" def __init__(self): super(StrategiesController, self).__init__() from_strategies = False """A flag to indicate if the requests to this controller are coming from the top-level resource Strategies.""" _custom_actions = { 'detail': ['GET'], 'state': ['GET'], } def _get_strategies_collection(self, filters, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): additional_fields = ["goal_uuid", "goal_name"] api_utils.validate_sort_key( sort_key, list(objects.Strategy.fields) + additional_fields) api_utils.validate_search_filters( filters, list(objects.Strategy.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Strategy.get_by_uuid( pecan.request.context, marker) need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) strategies = objects.Strategy.list( pecan.request.context, limit, marker_obj, filters=filters, sort_key=sort_db_key, sort_dir=sort_dir) strategies_collection = StrategyCollection.convert_with_links( strategies, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(strategies_collection.strategies, sort_key, sort_dir) return strategies_collection @wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text, int, wtypes.text, wtypes.text) def get_all(self, goal=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of strategies. :param goal: goal UUID or name to filter by. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'strategy:get_all', action='strategy:get_all') filters = {} if goal: if common_utils.is_uuid_like(goal): filters['goal_uuid'] = goal else: filters['goal_name'] = goal return self._get_strategies_collection( filters, marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text, int, wtypes.text, wtypes.text) def detail(self, goal=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of strategies with detail. :param goal: goal UUID or name to filter by. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'strategy:detail', action='strategy:detail') # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "strategies": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['strategies', 'detail']) filters = {} if goal: if common_utils.is_uuid_like(goal): filters['goal_uuid'] = goal else: filters['goal_name'] = goal return self._get_strategies_collection( filters, marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(wtypes.text, wtypes.text) def state(self, strategy): """Retrieve an information about strategy requirements. :param strategy: name of the strategy. """ context = pecan.request.context policy.enforce(context, 'strategy:state', action='strategy:state') parents = pecan.request.path.split('/')[:-1] if parents[-2] != "strategies": raise exception.HTTPNotFound rpc_strategy = api_utils.get_resource('Strategy', strategy) de_client = rpcapi.DecisionEngineAPI() strategy_state = de_client.get_strategy_info(context, rpc_strategy.name) strategy_state.extend([{ 'type': 'Name', 'state': rpc_strategy.name, 'mandatory': '', 'comment': ''}]) return strategy_state @wsme_pecan.wsexpose(Strategy, wtypes.text) def get_one(self, strategy): """Retrieve information about the given strategy. :param strategy: UUID or name of the strategy. """ if self.from_strategies: raise exception.OperationNotPermitted context = pecan.request.context rpc_strategy = api_utils.get_resource('Strategy', strategy) policy.enforce(context, 'strategy:get', rpc_strategy, action='strategy:get') return Strategy.convert_with_links(rpc_strategy) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/types.py0000664000175000017500000001457700000000000023513 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import strutils import wsme from wsme import types as wtypes from watcher._i18n import _ from watcher.common import exception from watcher.common import utils class UuidOrNameType(wtypes.UserType): """A simple UUID or logical name type.""" basetype = wtypes.text name = 'uuid_or_name' @staticmethod def validate(value): if not (utils.is_uuid_like(value) or utils.is_hostname_safe(value)): raise exception.InvalidUuidOrName(name=value) return value @staticmethod def frombasetype(value): if value is None: return None return UuidOrNameType.validate(value) class IntervalOrCron(wtypes.UserType): """A simple int value or cron syntax type""" basetype = wtypes.text name = 'interval_or_cron' @staticmethod def validate(value): if not (utils.is_int_like(value) or utils.is_cron_like(value)): raise exception.InvalidIntervalOrCron(name=value) return value @staticmethod def frombasetype(value): if value is None: return None return IntervalOrCron.validate(value) interval_or_cron = IntervalOrCron() class NameType(wtypes.UserType): """A simple logical name type.""" basetype = wtypes.text name = 'name' @staticmethod def validate(value): if not utils.is_hostname_safe(value): raise exception.InvalidName(name=value) return value @staticmethod def frombasetype(value): if value is None: return None return NameType.validate(value) class UuidType(wtypes.UserType): """A simple UUID type.""" basetype = wtypes.text name = 'uuid' @staticmethod def validate(value): if not utils.is_uuid_like(value): raise exception.InvalidUUID(uuid=value) return value @staticmethod def frombasetype(value): if value is None: return None return UuidType.validate(value) class BooleanType(wtypes.UserType): """A simple boolean type.""" basetype = wtypes.text name = 'boolean' @staticmethod def validate(value): try: return strutils.bool_from_string(value, strict=True) except ValueError as e: # raise Invalid to return 400 (BadRequest) in the API raise exception.Invalid(e) @staticmethod def frombasetype(value): if value is None: return None return BooleanType.validate(value) class JsonType(wtypes.UserType): """A simple JSON type.""" basetype = wtypes.text name = 'json' def __str__(self): # These are the json serializable native types return ' | '.join(map(str, (wtypes.text, int, float, BooleanType, list, dict, None))) @staticmethod def validate(value): try: jsonutils.dumps(value, default=None) except TypeError: raise exception.Invalid(_('%s is not JSON serializable') % value) else: return value @staticmethod def frombasetype(value): return JsonType.validate(value) uuid = UuidType() boolean = BooleanType() jsontype = JsonType() class MultiType(wtypes.UserType): """A complex type that represents one or more types. Used for validating that a value is an instance of one of the types. :param types: Variable-length list of types. """ def __init__(self, *types): self.types = types def __str__(self): return ' | '.join(map(str, self.types)) def validate(self, value): for t in self.types: if t is wsme.types.text and isinstance(value, wsme.types.bytes): value = value.decode() if isinstance(value, t): return value else: raise ValueError( _("Wrong type. Expected '%(type)s', got '%(value)s'"), type=self.types, value=type(value) ) class JsonPatchType(wtypes.Base): """A complex type that represents a single json-patch operation.""" path = wtypes.wsattr(wtypes.StringType(pattern=r'^(/[\w-]+)+$'), mandatory=True) op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'), mandatory=True) value = wsme.wsattr(jsontype, default=wtypes.Unset) @staticmethod def internal_attrs(): """Returns a list of internal attributes. Internal attributes can't be added, replaced or removed. This method may be overwritten by derived class. """ return ['/created_at', '/id', '/links', '/updated_at', '/deleted_at', '/uuid'] @staticmethod def mandatory_attrs(): """Returns a list of mandatory attributes. Mandatory attributes can't be removed from the document. This method should be overwritten by derived class. """ return [] @staticmethod def validate(patch): _path = '/{0}'.format(patch.path.split('/')[1]) if _path in patch.internal_attrs(): msg = _("'%s' is an internal attribute and can not be updated") raise wsme.exc.ClientSideError(msg % patch.path) if patch.path in patch.mandatory_attrs() and patch.op == 'remove': msg = _("'%s' is a mandatory attribute and can not be removed") raise wsme.exc.ClientSideError(msg % patch.path) if patch.op != 'remove': if patch.value is wsme.Unset: msg = _("'add' and 'replace' operations needs value") raise wsme.exc.ClientSideError(msg) ret = {'path': patch.path, 'op': patch.op} if patch.value is not wsme.Unset: ret['value'] = patch.value return ret ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/utils.py0000664000175000017500000001374000000000000023476 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from operator import attrgetter import jsonpatch from oslo_config import cfg from oslo_utils import reflection from oslo_utils import uuidutils import pecan import wsme from watcher._i18n import _ from watcher.api.controllers.v1 import versions from watcher.common import utils from watcher import objects CONF = cfg.CONF JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, jsonpatch.JsonPointerException, KeyError) def validate_limit(limit): if limit is None: return CONF.api.max_limit if limit <= 0: # Case where we don't a valid limit value raise wsme.exc.ClientSideError(_("Limit must be positive")) if limit and not CONF.api.max_limit: # Case where we don't have an upper limit return limit return min(CONF.api.max_limit, limit) def validate_sort_dir(sort_dir): if sort_dir not in ['asc', 'desc']: raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. " "Acceptable values are " "'asc' or 'desc'") % sort_dir) def validate_sort_key(sort_key, allowed_fields): # Very lightweight validation for now if sort_key not in allowed_fields: raise wsme.exc.ClientSideError( _("Invalid sort key: %s") % sort_key) def validate_search_filters(filters, allowed_fields): # Very lightweight validation for now # todo: improve this (e.g. https://www.parse.com/docs/rest/guide/#queries) for filter_name in filters: if filter_name not in allowed_fields: raise wsme.exc.ClientSideError( _("Invalid filter: %s") % filter_name) def check_need_api_sort(sort_key, additional_fields): return sort_key in additional_fields def make_api_sort(sorting_list, sort_key, sort_dir): # First sort by uuid field, than sort by sort_key # sort() ensures stable sorting, so we could # make lexicographical sort reverse_direction = (sort_dir == 'desc') sorting_list.sort(key=attrgetter('uuid'), reverse=reverse_direction) sorting_list.sort(key=attrgetter(sort_key), reverse=reverse_direction) def apply_jsonpatch(doc, patch): for p in patch: if p['op'] == 'add' and p['path'].count('/') == 1: if p['path'].lstrip('/') not in doc: msg = _('Adding a new attribute (%s) to the root of ' ' the resource is not allowed') raise wsme.exc.ClientSideError(msg % p['path']) return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch)) def get_patch_value(patch, key): for p in patch: if p['op'] == 'replace' and p['path'] == '/%s' % key: return p['value'] def set_patch_value(patch, key, value): for p in patch: if p['op'] == 'replace' and p['path'] == '/%s' % key: p['value'] = value def get_patch_key(patch, key): for p in patch: if p['op'] == 'replace' and key in p.keys(): return p[key][1:] def check_audit_state_transition(patch, initial): is_transition_valid = True state_value = get_patch_value(patch, "state") if state_value is not None: is_transition_valid = objects.audit.AuditStateTransitionManager( ).check_transition(initial, state_value) return is_transition_valid def as_filters_dict(**filters): filters_dict = {} for filter_name, filter_value in filters.items(): if filter_value: filters_dict[filter_name] = filter_value return filters_dict def get_resource(resource, resource_id, eager=False): """Get the resource from the uuid, id or logical name. :param resource: the resource type. :param resource_id: the UUID, ID or logical name of the resource. :returns: The resource. """ resource = getattr(objects, resource) _get = None if utils.is_int_like(resource_id): resource_id = int(resource_id) _get = resource.get elif uuidutils.is_uuid_like(resource_id): _get = resource.get_by_uuid else: _get = resource.get_by_name method_signature = reflection.get_signature(_get) if 'eager' in method_signature.parameters: return _get(pecan.request.context, resource_id, eager=eager) return _get(pecan.request.context, resource_id) def allow_start_end_audit_time(): """Check if we should support optional start/end attributes for Audit. Version 1.1 of the API added support for start and end time of continuous audits. """ return pecan.request.version.minor >= ( versions.VERSIONS.MINOR_1_START_END_TIMING.value) def allow_force(): """Check if we should support optional force attribute for Audit. Version 1.2 of the API added support for forced audits that allows to launch audit when other action plan is ongoing. """ return pecan.request.version.minor >= ( versions.VERSIONS.MINOR_2_FORCE.value) def allow_list_datamodel(): """Check if we should support list data model API. Version 1.3 of the API added support to list data model. """ return pecan.request.version.minor >= ( versions.VERSIONS.MINOR_3_DATAMODEL.value) def allow_webhook_api(): """Check if we should support webhook API. Version 1.4 of the API added support to trigger webhook. """ return pecan.request.version.minor >= ( versions.VERSIONS.MINOR_4_WEBHOOK_API.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/versions.py0000664000175000017500000000344300000000000024205 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Intel Corporation # Copyright (c) 2018 SBCloud # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum class VERSIONS(enum.Enum): MINOR_0_ROCKY = 0 # v1.0: corresponds to Rocky API MINOR_1_START_END_TIMING = 1 # v1.1: Add start/end timei for audit MINOR_2_FORCE = 2 # v1.2: Add force field to audit MINOR_3_DATAMODEL = 3 # v1.3: Add list datamodel API MINOR_4_WEBHOOK_API = 4 # v1.4: Add webhook trigger API MINOR_MAX_VERSION = 4 # This is the version 1 API BASE_VERSION = 1 # String representations of the minor and maximum versions _MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, VERSIONS.MINOR_0_ROCKY.value) _MAX_VERSION_STRING = '{}.{}'.format(BASE_VERSION, VERSIONS.MINOR_MAX_VERSION.value) def service_type_string(): return 'infra-optim' def min_version_string(): """Returns the minimum supported API version (as a string)""" return _MIN_VERSION_STRING def max_version_string(): """Returns the maximum supported API version (as a string). If the service is pinned, the maximum API version is the pinned version. Otherwise, it is the maximum supported API version. """ return _MAX_VERSION_STRING ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/controllers/v1/webhooks.py0000664000175000017500000000425000000000000024153 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Webhook endpoint for Watcher v1 REST API. """ from http import HTTPStatus from oslo_log import log import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils from watcher.common import exception from watcher.decision_engine import rpcapi from watcher import objects LOG = log.getLogger(__name__) class WebhookController(rest.RestController): """REST controller for webhooks resource.""" def __init__(self): super(WebhookController, self).__init__() self.dc_client = rpcapi.DecisionEngineAPI() @wsme_pecan.wsexpose(None, wtypes.text, body=types.jsontype, status_code=HTTPStatus.ACCEPTED) def post(self, audit_ident, body): """Trigger the given audit. :param audit_ident: UUID or name of an audit. """ LOG.debug("Webhook trigger Audit: %s.", audit_ident) context = pecan.request.context audit = utils.get_resource('Audit', audit_ident) if audit is None: raise exception.AuditNotFound(audit=audit_ident) if audit.audit_type != objects.audit.AuditType.EVENT.value: raise exception.AuditTypeNotAllowed(audit_type=audit.audit_type) allowed_state = ( objects.audit.State.PENDING, objects.audit.State.SUCCEEDED, ) if audit.state not in allowed_state: raise exception.AuditStateNotAllowed(state=audit.state) # trigger decision-engine to run the audit self.dc_client.trigger_audit(context, audit.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/hooks.py0000664000175000017500000000762100000000000020566 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_config import cfg from pecan import hooks from watcher.common import context class ContextHook(hooks.PecanHook): """Configures a request context and attaches it to the request. The following HTTP request headers are used: X-User: Used for context.user. X-User-Id: Used for context.user_id. X-Project-Name: Used for context.project. X-Project-Id: Used for context.project_id. X-Auth-Token: Used for context.auth_token. """ def before(self, state): headers = state.request.headers user = headers.get('X-User') user_id = headers.get('X-User-Id') project = headers.get('X-Project-Name') project_id = headers.get('X-Project-Id') domain_id = headers.get('X-User-Domain-Id') domain_name = headers.get('X-User-Domain-Name') auth_token = headers.get('X-Storage-Token') auth_token = headers.get('X-Auth-Token', auth_token) show_deleted = headers.get('X-Show-Deleted') auth_token_info = state.request.environ.get('keystone.token_info') roles = (headers.get('X-Roles', None) and headers.get('X-Roles').split(',')) state.request.context = context.make_context( auth_token=auth_token, auth_token_info=auth_token_info, user=user, user_id=user_id, project=project, project_id=project_id, domain_id=domain_id, domain_name=domain_name, show_deleted=show_deleted, roles=roles) class NoExceptionTracebackHook(hooks.PecanHook): """Workaround rpc.common: deserialize_remote_exception. deserialize_remote_exception builds rpc exception traceback into error message which is then sent to the client. Such behavior is a security concern so this hook is aimed to cut-off traceback from the error message. """ # NOTE(max_lobur): 'after' hook used instead of 'on_error' because # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator # catches and handles all the errors, so 'on_error' dedicated for unhandled # exceptions never fired. def after(self, state): # Omit empty body. Some errors may not have body at this level yet. if not state.response.body: return # Do nothing if there is no error. # Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not # an error. if (HTTPStatus.OK <= state.response.status_int < HTTPStatus.BAD_REQUEST): return json_body = state.response.json # Do not remove traceback when traceback config is set if cfg.CONF.debug: return faultstring = json_body.get('faultstring') traceback_marker = 'Traceback (most recent call last):' if faultstring and traceback_marker in faultstring: # Cut-off traceback. faultstring = faultstring.split(traceback_marker, 1)[0] # Remove trailing newlines and spaces if any. json_body['faultstring'] = faultstring.rstrip() # Replace the whole json. Cannot change original one because it's # generated on the fly. state.response.json = json_body ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6231353 python_watcher-14.0.0/watcher/api/middleware/0000775000175000017500000000000000000000000021200 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/middleware/__init__.py0000664000175000017500000000000000000000000023277 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/middleware/auth_token.py0000664000175000017500000000407100000000000023715 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from oslo_log import log from keystonemiddleware import auth_token from watcher._i18n import _ from watcher.common import exception from watcher.common import utils LOG = log.getLogger(__name__) class AuthTokenMiddleware(auth_token.AuthProtocol): """A wrapper on Keystone auth_token middleware. Does not perform verification of authentication tokens for public routes in the API. """ def __init__(self, app, conf, public_api_routes=()): route_pattern_tpl = r'%s(\.json|\.xml)?$' try: self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) for route_tpl in public_api_routes] except re.error as e: LOG.exception(e) raise exception.ConfigInvalid( error_msg=_('Cannot compile public API routes')) super(AuthTokenMiddleware, self).__init__(app, conf) def __call__(self, env, start_response): path = utils.safe_rstrip(env.get('PATH_INFO'), '/') # The information whether the API call is being performed against the # public API is required for some other components. Saving it to the # WSGI environment is reasonable thereby. env['is_public_api'] = any(re.match(pattern, path) for pattern in self.public_api_routes) if env['is_public_api']: return self._app(env, start_response) return super(AuthTokenMiddleware, self).__call__(env, start_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/middleware/parsable_error.py0000664000175000017500000000734600000000000024566 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Middleware to replace the plain text message body of an error response with one formatted so the client can parse it. Based on pecan.middleware.errordocument """ from xml import etree as et from oslo_log import log from oslo_serialization import jsonutils import webob from watcher._i18n import _ LOG = log.getLogger(__name__) class ParsableErrorMiddleware(object): """Replace error body with something the client can parse.""" def __init__(self, app): self.app = app def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception(_( 'ErrorDocumentMiddleware received an invalid ' 'status %s') % status) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type')] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): req = webob.Request(environ) if ( req.accept.best_match( ['application/json', 'application/xml']) == 'application/xml' ): try: # simple check xml is valid body = [ et.ElementTree.tostring( et.ElementTree.Element( 'error_message', text='\n'.join(app_iter)))] except et.ElementTree.ParseError as err: LOG.error('Error parsing HTTP response: %s', err) body = ['%s' '' % state['status_code']] state['headers'].append(('Content-Type', 'application/xml')) else: app_iter = [i.decode('utf-8') for i in app_iter] body = [jsonutils.dumps( {'error_message': '\n'.join(app_iter)})] body = [item.encode('utf-8') for item in body] state['headers'].append(('Content-Type', 'application/json')) state['headers'].append(('Content-Length', str(len(body[0])))) else: body = app_iter return body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/scheduling.py0000664000175000017500000001224300000000000021564 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import itertools from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from watcher.common import context as watcher_context from watcher.common import scheduling from watcher import notifications from watcher import objects CONF = cfg.CONF LOG = log.getLogger(__name__) class APISchedulingService(scheduling.BackgroundSchedulerService): def __init__(self, gconfig={}, **options): self.services_status = {} super(APISchedulingService, self).__init__(gconfig, **options) def get_services_status(self, context): services = objects.service.Service.list(context) active_s = objects.service.ServiceStatus.ACTIVE failed_s = objects.service.ServiceStatus.FAILED for service in services: result = self.get_service_status(context, service.id) if service.id not in self.services_status: self.services_status[service.id] = result continue if self.services_status[service.id] != result: self.services_status[service.id] = result notifications.service.send_service_update(context, service, state=result) if (result == failed_s) and ( service.name == 'watcher-decision-engine'): audit_filters = { 'audit_type': objects.audit.AuditType.CONTINUOUS.value, 'state': objects.audit.State.ONGOING, 'hostname': service.host } ongoing_audits = objects.Audit.list( context, filters=audit_filters, eager=True) alive_services = [ s.host for s in services if (self.services_status[s.id] == active_s and s.name == 'watcher-decision-engine')] round_robin = itertools.cycle(alive_services) for audit in ongoing_audits: audit.hostname = round_robin.__next__() audit.save() LOG.info('Audit %(audit)s has been migrated to ' '%(host)s since %(failed_host)s is in' ' %(state)s', {'audit': audit.uuid, 'host': audit.hostname, 'failed_host': service.host, 'state': failed_s}) def get_service_status(self, context, service_id): service = objects.Service.get(context, service_id) last_heartbeat = (service.last_seen_up or service.updated_at or service.created_at) if isinstance(last_heartbeat, str): # NOTE(russellb) If this service came in over rpc via # conductor, then the timestamp will be a string and needs to be # converted back to a datetime. last_heartbeat = timeutils.parse_strtime(last_heartbeat) else: # Objects have proper UTC timezones, but the timeutils comparison # below does not (and will fail) last_heartbeat = last_heartbeat.replace(tzinfo=None) elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) is_up = abs(elapsed) <= CONF.service_down_time if not is_up: LOG.warning('Seems service %(name)s on host %(host)s is down. ' 'Last heartbeat was %(lhb)s. Elapsed time is %(el)s', {'name': service.name, 'host': service.host, 'lhb': str(last_heartbeat), 'el': str(elapsed)}) return objects.service.ServiceStatus.FAILED return objects.service.ServiceStatus.ACTIVE def start(self): """Start service.""" context = watcher_context.make_context(is_admin=True) self.add_job(self.get_services_status, name='service_status', trigger='interval', jobstore='default', args=[context], next_run_time=datetime.datetime.now(), seconds=CONF.periodic_interval) super(APISchedulingService, self).start() def stop(self): """Stop service.""" self.shutdown() def wait(self): """Wait for service to complete.""" def reset(self): """Reset service. Called in case service running in daemon mode receives SIGHUP. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/api/wsgi.py0000664000175000017500000000236500000000000020414 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI script for Watcher API, installed by pbr.""" import sys from oslo_config import cfg import oslo_i18n as i18n from oslo_log import log from watcher.api import app from watcher.common import service CONF = cfg.CONF LOG = log.getLogger(__name__) def initialize_wsgi_app(show_deprecated=False): i18n.install('watcher') service.prepare_service(sys.argv) LOG.debug("Configuration:") CONF.log_opt_values(LOG, log.DEBUG) if show_deprecated: LOG.warning("Using watcher/api/app.wsgi is deprecated and it will " "be removed in U release. Please use automatically " "generated watcher-api-wsgi instead.") return app.VersionSelectorApplication() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6231353 python_watcher-14.0.0/watcher/applier/0000775000175000017500000000000000000000000017746 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/__init__.py0000664000175000017500000000000000000000000022045 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6231353 python_watcher-14.0.0/watcher/applier/action_plan/0000775000175000017500000000000000000000000022235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/action_plan/__init__.py0000664000175000017500000000000000000000000024334 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/action_plan/base.py0000664000175000017500000000146400000000000023526 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc class BaseActionPlanHandler(object, metaclass=abc.ABCMeta): @abc.abstractmethod def execute(self): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/action_plan/default.py0000664000175000017500000001027600000000000024241 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher.applier.action_plan import base from watcher.applier import default from watcher.common import exception from watcher import notifications from watcher import objects from watcher.objects import fields CONF = cfg.CONF LOG = log.getLogger(__name__) class DefaultActionPlanHandler(base.BaseActionPlanHandler): def __init__(self, context, service, action_plan_uuid): super(DefaultActionPlanHandler, self).__init__() self.ctx = context self.service = service self.action_plan_uuid = action_plan_uuid def execute(self): try: action_plan = objects.ActionPlan.get_by_uuid( self.ctx, self.action_plan_uuid, eager=True) if action_plan.state == objects.action_plan.State.CANCELLED: self._update_action_from_pending_to_cancelled() return action_plan.hostname = CONF.host action_plan.state = objects.action_plan.State.ONGOING action_plan.save() notifications.action_plan.send_action_notification( self.ctx, action_plan, action=fields.NotificationAction.EXECUTION, phase=fields.NotificationPhase.START) applier = default.DefaultApplier(self.ctx, self.service) applier.execute(self.action_plan_uuid) action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() notifications.action_plan.send_action_notification( self.ctx, action_plan, action=fields.NotificationAction.EXECUTION, phase=fields.NotificationPhase.END) except exception.ActionPlanCancelled as e: LOG.exception(e) action_plan.state = objects.action_plan.State.CANCELLED self._update_action_from_pending_to_cancelled() action_plan.save() notifications.action_plan.send_cancel_notification( self.ctx, action_plan, action=fields.NotificationAction.CANCEL, phase=fields.NotificationPhase.END) except Exception as e: LOG.exception(e) action_plan = objects.ActionPlan.get_by_uuid( self.ctx, self.action_plan_uuid, eager=True) if action_plan.state == objects.action_plan.State.CANCELLING: action_plan.state = objects.action_plan.State.FAILED action_plan.save() notifications.action_plan.send_cancel_notification( self.ctx, action_plan, action=fields.NotificationAction.CANCEL, priority=fields.NotificationPriority.ERROR, phase=fields.NotificationPhase.ERROR) else: action_plan.state = objects.action_plan.State.FAILED action_plan.save() notifications.action_plan.send_action_notification( self.ctx, action_plan, action=fields.NotificationAction.EXECUTION, priority=fields.NotificationPriority.ERROR, phase=fields.NotificationPhase.ERROR) def _update_action_from_pending_to_cancelled(self): filters = {'action_plan_uuid': self.action_plan_uuid, 'state': objects.action.State.PENDING} actions = objects.Action.list(self.ctx, filters=filters, eager=True) if actions: for a in actions: a.state = objects.action.State.CANCELLED a.save() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6271353 python_watcher-14.0.0/watcher/applier/actions/0000775000175000017500000000000000000000000021406 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/actions/__init__.py0000664000175000017500000000000000000000000023505 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/actions/base.py0000664000175000017500000001143000000000000022671 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import jsonschema from watcher.common import clients from watcher.common.loader import loadable class BaseAction(loadable.Loadable, metaclass=abc.ABCMeta): # NOTE(jed): by convention we decided # that the attribute "resource_id" is the unique id of # the resource to which the Action applies to allow us to use it in the # watcher dashboard and will be nested in input_parameters RESOURCE_ID = 'resource_id' # Add action class name to the list, if implementing abort. ABORT_TRUE = ['Sleep', 'Nop'] def __init__(self, config, osc=None): """Constructor :param config: A mapping containing the configuration of this action :type config: dict :param osc: an OpenStackClients instance, defaults to None :type osc: :py:class:`~.OpenStackClients` instance, optional """ super(BaseAction, self).__init__(config) self._input_parameters = {} self._osc = osc @property def osc(self): if not self._osc: self._osc = clients.OpenStackClients() return self._osc @property def input_parameters(self): return self._input_parameters @input_parameters.setter def input_parameters(self, p): self._input_parameters = p @property def resource_id(self): return self.input_parameters[self.RESOURCE_ID] @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] @abc.abstractmethod def execute(self): """Executes the main logic of the action This method can be used to perform an action on a given set of input parameters to accomplish some type of operation. This operation may return a boolean value as a result of its execution. If False, this will be considered as an error and will then trigger the reverting of the actions. :returns: A flag indicating whether or not the action succeeded :rtype: bool """ raise NotImplementedError() @abc.abstractmethod def revert(self): """Revert this action This method should rollback the resource to its initial state in the event of a faulty execution. This happens when the action raised an exception during its :py:meth:`~.BaseAction.execute`. """ raise NotImplementedError() @abc.abstractmethod def pre_condition(self): """Hook: called before the execution of an action This method can be used to perform some initializations or to make some more advanced validation on its input parameters. So if you wish to block its execution based on this factor, `raise` the related exception. """ raise NotImplementedError() @abc.abstractmethod def post_condition(self): """Hook: called after the execution of an action This function is called regardless of whether an action succeeded or not. So you can use it to perform cleanup operations. """ raise NotImplementedError() @property @abc.abstractmethod def schema(self): """Defines a Schema that the input parameters shall comply to :returns: A schema declaring the input parameters this action should be provided along with their respective constraints :rtype: :py:class:`jsonschema.Schema` instance """ raise NotImplementedError() def validate_parameters(self): jsonschema.validate(self.input_parameters, self.schema) return True @abc.abstractmethod def get_description(self): """Description of the action""" raise NotImplementedError() def check_abort(self): if self.__class__.__name__ == 'Migrate': if self.migration_type == self.LIVE_MIGRATION: return True else: return False else: return bool(self.__class__.__name__ in self.ABORT_TRUE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/actions/change_node_power_state.py0000664000175000017500000001026200000000000026627 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Authors: Li Canwei # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from oslo_log import log from watcher._i18n import _ from watcher.applier.actions import base from watcher.common import exception from watcher.common.metal_helper import constants as metal_constants from watcher.common.metal_helper import factory as metal_helper_factory LOG = log.getLogger(__name__) class ChangeNodePowerState(base.BaseAction): """Compute node power on/off By using this action, you will be able to on/off the power of a compute node. The action schema is:: schema = Schema({ 'resource_id': str, 'state': str, }) The `resource_id` references a baremetal node id (list of available ironic nodes is returned by this command: ``ironic node-list``). The `state` value should either be `on` or `off`. """ STATE = 'state' @property def schema(self): return { 'type': 'object', 'properties': { 'resource_id': { 'type': 'string', "minlength": 1 }, 'resource_name': { 'type': 'string', "minlength": 1 }, 'state': { 'type': 'string', 'enum': [metal_constants.PowerState.ON.value, metal_constants.PowerState.OFF.value] } }, 'required': ['resource_id', 'state'], 'additionalProperties': False, } @property def node_uuid(self): return self.resource_id @property def state(self): return self.input_parameters.get(self.STATE) def execute(self): target_state = self.state return self._node_manage_power(target_state) def revert(self): if self.state == metal_constants.PowerState.ON.value: target_state = metal_constants.PowerState.OFF.value elif self.state == metal_constants.PowerState.OFF.value: target_state = metal_constants.PowerState.ON.value return self._node_manage_power(target_state) def _node_manage_power(self, state, retry=60): if state is None: raise exception.IllegalArgumentException( message=_("The target state is not defined")) metal_helper = metal_helper_factory.get_helper(self.osc) node = metal_helper.get_node(self.node_uuid) current_state = node.get_power_state() if state == current_state.value: return True if state == metal_constants.PowerState.OFF.value: compute_node = node.get_hypervisor_node().to_dict() if (compute_node['running_vms'] == 0): node.set_power_state(state) else: LOG.warning( "Compute node %s has %s running vms and will " "NOT be shut off.", compute_node["hypervisor_hostname"], compute_node['running_vms']) return False else: node.set_power_state(state) node = metal_helper.get_node(self.node_uuid) while node.get_power_state() == current_state and retry: time.sleep(10) retry -= 1 node = metal_helper.get_node(self.node_uuid) if retry > 0: return True else: return False def pre_condition(self): pass def post_condition(self): pass def get_description(self): """Description of the action""" return ("Compute node power on/off through Ironic or MaaS.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/actions/change_nova_service_state.py0000664000175000017500000001060400000000000027151 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from watcher._i18n import _ from watcher.applier.actions import base from watcher.common import exception from watcher.common import nova_helper from watcher.decision_engine.model import element class ChangeNovaServiceState(base.BaseAction): """Disables or enables the nova-compute service, deployed on a host By using this action, you will be able to update the state of a nova-compute service. A disabled nova-compute service can not be selected by the nova scheduler for future deployment of server. The action schema is:: schema = Schema({ 'resource_id': str, 'state': str, 'disabled_reason': str, }) The `resource_id` references a nova-compute service name (list of available nova-compute services is returned by this command: ``nova service-list --binary nova-compute``). The `state` value should either be `ONLINE` or `OFFLINE`. The `disabled_reason` references the reason why Watcher disables this nova-compute service. The value should be with `watcher_` prefix, such as `watcher_disabled`, `watcher_maintaining`. """ STATE = 'state' REASON = 'disabled_reason' RESOURCE_NAME = 'resource_name' @property def schema(self): return { 'type': 'object', 'properties': { 'resource_id': { 'type': 'string', "minlength": 1 }, 'resource_name': { 'type': 'string', "minlength": 1 }, 'state': { 'type': 'string', 'enum': [element.ServiceState.ONLINE.value, element.ServiceState.OFFLINE.value, element.ServiceState.ENABLED.value, element.ServiceState.DISABLED.value] }, 'disabled_reason': { 'type': 'string', "minlength": 1 } }, 'required': ['resource_id', 'state'], 'additionalProperties': False, } @property def host(self): return self.input_parameters.get(self.RESOURCE_NAME) @property def state(self): return self.input_parameters.get(self.STATE) @property def reason(self): return self.input_parameters.get(self.REASON) def execute(self): target_state = None if self.state == element.ServiceState.DISABLED.value: target_state = False elif self.state == element.ServiceState.ENABLED.value: target_state = True return self._nova_manage_service(target_state) def revert(self): target_state = None if self.state == element.ServiceState.DISABLED.value: target_state = True elif self.state == element.ServiceState.ENABLED.value: target_state = False return self._nova_manage_service(target_state) def _nova_manage_service(self, state): if state is None: raise exception.IllegalArgumentException( message=_("The target state is not defined")) nova = nova_helper.NovaHelper(osc=self.osc) if state is True: return nova.enable_service_nova_compute(self.host) else: return nova.disable_service_nova_compute(self.host, self.reason) def pre_condition(self): pass def post_condition(self): pass def get_description(self): """Description of the action""" return ("Disables or enables the nova-compute service." "A disabled nova-compute service can not be selected " "by the nova for future deployment of new server.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/actions/factory.py0000664000175000017500000000304000000000000023424 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.applier.loading import default LOG = log.getLogger(__name__) class ActionFactory(object): def __init__(self): self.action_loader = default.DefaultActionLoader() def make_action(self, object_action, osc=None): LOG.debug("Creating instance of %s", object_action.action_type) loaded_action = self.action_loader.load(name=object_action.action_type, osc=osc) loaded_action.input_parameters = object_action.input_parameters LOG.debug("Checking the input parameters") # NOTE(jed) if we change the schema of an action and we try to reload # an older version of the Action, the validation can fail. # We need to add the versioning of an Action or a migration tool. # We can also create an new Action which extends the previous one. loaded_action.validate_parameters() return loaded_action ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/actions/migration.py0000664000175000017500000001746500000000000023766 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.applier.actions import base from watcher.common import exception from watcher.common import nova_helper LOG = log.getLogger(__name__) class Migrate(base.BaseAction): """Migrates a server to a destination nova-compute host This action will allow you to migrate a server to another compute destination host. Migration type 'live' can only be used for migrating active VMs. Migration type 'cold' can be used for migrating non-active VMs as well active VMs, which will be shut down while migrating. The action schema is:: schema = Schema({ 'resource_id': str, # should be a UUID 'migration_type': str, # choices -> "live", "cold" 'destination_node': str, 'source_node': str, }) The `resource_id` is the UUID of the server to migrate. The `source_node` and `destination_node` parameters are respectively the source and the destination compute hostname (list of available compute hosts is returned by this command: ``nova service-list --binary nova-compute``). .. note:: Nova API version must be 2.56 or above if `destination_node` parameter is given. """ # input parameters constants MIGRATION_TYPE = 'migration_type' LIVE_MIGRATION = 'live' COLD_MIGRATION = 'cold' DESTINATION_NODE = 'destination_node' SOURCE_NODE = 'source_node' @property def schema(self): return { 'type': 'object', 'properties': { 'destination_node': { "anyof": [ {'type': 'string', "minLength": 1}, {'type': 'None'} ] }, 'migration_type': { 'type': 'string', "enum": ["live", "cold"] }, 'resource_id': { 'type': 'string', "minlength": 1, "pattern": ("^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-" "([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-" "([a-fA-F0-9]){12}$") }, 'resource_name': { 'type': 'string', "minlength": 1 }, 'source_node': { 'type': 'string', "minLength": 1 } }, 'required': ['migration_type', 'resource_id', 'source_node'], 'additionalProperties': False, } @property def instance_uuid(self): return self.resource_id @property def migration_type(self): return self.input_parameters.get(self.MIGRATION_TYPE) @property def destination_node(self): return self.input_parameters.get(self.DESTINATION_NODE) @property def source_node(self): return self.input_parameters.get(self.SOURCE_NODE) def _live_migrate_instance(self, nova, destination): result = None try: result = nova.live_migrate_instance(instance_id=self.instance_uuid, dest_hostname=destination) except nova_helper.nvexceptions.ClientException as e: LOG.debug("Nova client exception occurred while live " "migrating instance " "%(instance)s.Exception: %(exception)s", {'instance': self.instance_uuid, 'exception': e}) except Exception as e: LOG.exception(e) LOG.critical("Unexpected error occurred. Migration failed for " "instance %s. Leaving instance on previous " "host.", self.instance_uuid) return result def _cold_migrate_instance(self, nova, destination): result = None try: result = nova.watcher_non_live_migrate_instance( instance_id=self.instance_uuid, dest_hostname=destination) except Exception as exc: LOG.exception(exc) LOG.critical("Unexpected error occurred. Migration failed for " "instance %s. Leaving instance on previous " "host.", self.instance_uuid) return result def _abort_cold_migrate(self, nova): # TODO(adisky): currently watcher uses its own version of cold migrate # implement cold migrate using nova dependent on the blueprint # https://blueprints.launchpad.net/nova/+spec/cold-migration-with-target # Abort operation for cold migrate is dependent on blueprint # https://blueprints.launchpad.net/nova/+spec/abort-cold-migration LOG.warning("Abort operation for cold migration is not implemented") def _abort_live_migrate(self, nova, source, destination): return nova.abort_live_migrate(instance_id=self.instance_uuid, source=source, destination=destination) def migrate(self, destination=None): nova = nova_helper.NovaHelper(osc=self.osc) if destination is None: LOG.debug("Migrating instance %s, destination node will be " "determined by nova-scheduler", self.instance_uuid) else: LOG.debug("Migrate instance %s to %s", self.instance_uuid, destination) instance = nova.find_instance(self.instance_uuid) if instance: if self.migration_type == self.LIVE_MIGRATION: return self._live_migrate_instance(nova, destination) elif self.migration_type == self.COLD_MIGRATION: return self._cold_migrate_instance(nova, destination) else: raise exception.Invalid( message=(_("Migration of type '%(migration_type)s' is not " "supported.") % {'migration_type': self.migration_type})) else: raise exception.InstanceNotFound(name=self.instance_uuid) def execute(self): return self.migrate(destination=self.destination_node) def revert(self): return self.migrate(destination=self.source_node) def abort(self): nova = nova_helper.NovaHelper(osc=self.osc) instance = nova.find_instance(self.instance_uuid) if instance: if self.migration_type == self.COLD_MIGRATION: return self._abort_cold_migrate(nova) elif self.migration_type == self.LIVE_MIGRATION: return self._abort_live_migrate( nova, source=self.source_node, destination=self.destination_node) else: raise exception.InstanceNotFound(name=self.instance_uuid) def pre_condition(self): # TODO(jed): check if the instance exists / check if the instance is on # the source_node pass def post_condition(self): # TODO(jed): check extra parameters (network response, etc.) pass def get_description(self): """Description of the action""" return "Moving a VM instance from source_node to destination_node" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/actions/nop.py0000664000175000017500000000352500000000000022561 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.applier.actions import base LOG = log.getLogger(__name__) class Nop(base.BaseAction): """logs a message The action schema is:: schema = Schema({ 'message': str, }) The `message` is the actual message that will be logged. """ MESSAGE = 'message' @property def schema(self): return { 'type': 'object', 'properties': { 'message': { 'type': ['string', 'null'] } }, 'required': ['message'], 'additionalProperties': False, } @property def message(self): return self.input_parameters.get(self.MESSAGE) def execute(self): LOG.debug("Executing action NOP message: %s ", self.message) return True def revert(self): LOG.debug("Revert action NOP") return True def pre_condition(self): pass def post_condition(self): pass def get_description(self): """Description of the action""" return "Logging a NOP message" def abort(self): LOG.debug("Abort action NOP") return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/actions/resize.py0000664000175000017500000000646300000000000023272 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.applier.actions import base from watcher.common import nova_helper LOG = log.getLogger(__name__) class Resize(base.BaseAction): """Resizes a server with specified flavor. This action will allow you to resize a server to another flavor. The action schema is:: schema = Schema({ 'resource_id': str, # should be a UUID 'flavor': str, # should be either ID or Name of Flavor }) The `resource_id` is the UUID of the server to resize. The `flavor` is the ID or Name of Flavor (Nova accepts either ID or Name of Flavor to resize() function). """ # input parameters constants FLAVOR = 'flavor' @property def schema(self): return { 'type': 'object', 'properties': { 'resource_id': { 'type': 'string', 'minlength': 1, 'pattern': ('^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-' '([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-' '([a-fA-F0-9]){12}$') }, 'flavor': { 'type': 'string', 'minlength': 1, }, }, 'required': ['resource_id', 'flavor'], 'additionalProperties': False, } @property def instance_uuid(self): return self.resource_id @property def flavor(self): return self.input_parameters.get(self.FLAVOR) def resize(self): nova = nova_helper.NovaHelper(osc=self.osc) LOG.debug("Resize instance %s to %s flavor", self.instance_uuid, self.flavor) instance = nova.find_instance(self.instance_uuid) result = None if instance: try: result = nova.resize_instance( instance_id=self.instance_uuid, flavor=self.flavor) except Exception as exc: LOG.exception(exc) LOG.critical( "Unexpected error occurred. Resizing failed for " "instance %s.", self.instance_uuid) return result def execute(self): return self.resize() def revert(self): LOG.warning("revert not supported") def pre_condition(self): # TODO(jed): check if the instance exists / check if the instance is on # the source_node pass def post_condition(self): # TODO(jed): check extra parameters (network response, etc.) pass def get_description(self): """Description of the action""" return "Resize a server with specified flavor." ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/actions/sleep.py0000664000175000017500000000375000000000000023075 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from oslo_log import log from watcher.applier.actions import base LOG = log.getLogger(__name__) class Sleep(base.BaseAction): """Makes the executor of the action plan wait for a given duration The action schema is:: schema = Schema({ 'duration': float, }) The `duration` is expressed in seconds. """ DURATION = 'duration' @property def schema(self): return { 'type': 'object', 'properties': { 'duration': { 'type': 'number', 'minimum': 0 }, }, 'required': ['duration'], 'additionalProperties': False, } @property def duration(self): return int(self.input_parameters.get(self.DURATION)) def execute(self): LOG.debug("Starting action sleep with duration: %s ", self.duration) time.sleep(self.duration) return True def revert(self): LOG.debug("Revert action sleep") return True def pre_condition(self): pass def post_condition(self): pass def get_description(self): """Description of the action""" return "Wait for a given interval in seconds." def abort(self): LOG.debug("Abort action sleep") return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/actions/volume_migration.py0000664000175000017500000002051300000000000025341 0ustar00zuulzuul00000000000000# Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from oslo_log import log from cinderclient import client as cinder_client from watcher._i18n import _ from watcher.applier.actions import base from watcher.common import cinder_helper from watcher.common import exception from watcher.common import keystone_helper from watcher.common import nova_helper from watcher.common import utils from watcher import conf CONF = conf.CONF LOG = log.getLogger(__name__) class VolumeMigrate(base.BaseAction): """Migrates a volume to destination node or type By using this action, you will be able to migrate cinder volume. Migration type 'swap' can only be used for migrating attached volume. Migration type 'migrate' can be used for migrating detached volume to the pool of same volume type. Migration type 'retype' can be used for changing volume type of detached volume. The action schema is:: schema = Schema({ 'resource_id': str, # should be a UUID 'migration_type': str, # choices -> "swap", "migrate","retype" 'destination_node': str, 'destination_type': str, }) The `resource_id` is the UUID of cinder volume to migrate. The `destination_node` is the destination block storage pool name. (list of available pools are returned by this command: ``cinder get-pools``) which is mandatory for migrating detached volume to the one with same volume type. The `destination_type` is the destination block storage type name. (list of available types are returned by this command: ``cinder type-list``) which is mandatory for migrating detached volume or swapping attached volume to the one with different volume type. """ MIGRATION_TYPE = 'migration_type' SWAP = 'swap' RETYPE = 'retype' MIGRATE = 'migrate' DESTINATION_NODE = "destination_node" DESTINATION_TYPE = "destination_type" def __init__(self, config, osc=None): super(VolumeMigrate, self).__init__(config) self.temp_username = utils.random_string(10) self.temp_password = utils.random_string(10) self.cinder_util = cinder_helper.CinderHelper(osc=self.osc) self.nova_util = nova_helper.NovaHelper(osc=self.osc) @property def schema(self): return { 'type': 'object', 'properties': { 'resource_id': { 'type': 'string', "minlength": 1, "pattern": ("^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-" "([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-" "([a-fA-F0-9]){12}$") }, 'resource_name': { 'type': 'string', "minlength": 1 }, 'migration_type': { 'type': 'string', "enum": ["swap", "retype", "migrate"] }, 'destination_node': { "anyof": [ {'type': 'string', "minLength": 1}, {'type': 'None'} ] }, 'destination_type': { "anyof": [ {'type': 'string', "minLength": 1}, {'type': 'None'} ] } }, 'required': ['resource_id', 'migration_type'], 'additionalProperties': False, } def validate_parameters(self): jsonschema.validate(self.input_parameters, self.schema) return True @property def volume_id(self): return self.input_parameters.get(self.RESOURCE_ID) @property def migration_type(self): return self.input_parameters.get(self.MIGRATION_TYPE) @property def destination_node(self): return self.input_parameters.get(self.DESTINATION_NODE) @property def destination_type(self): return self.input_parameters.get(self.DESTINATION_TYPE) def _can_swap(self, volume): """Judge volume can be swapped""" if not volume.attachments: return False instance_id = volume.attachments[0]['server_id'] instance_status = self.nova_util.find_instance(instance_id).status if (volume.status == 'in-use' and instance_status in ('ACTIVE', 'PAUSED', 'RESIZED')): return True return False def _create_user(self, volume, user): """Create user with volume attribute and user information""" keystone_util = keystone_helper.KeystoneHelper(osc=self.osc) project_id = getattr(volume, 'os-vol-tenant-attr:tenant_id') user['project'] = project_id user['domain'] = keystone_util.get_project(project_id).domain_id user['roles'] = ['admin'] return keystone_util.create_user(user) def _get_cinder_client(self, session): """Get cinder client by session""" return cinder_client.Client( CONF.cinder_client.api_version, session=session, endpoint_type=CONF.cinder_client.endpoint_type) def _swap_volume(self, volume, dest_type): """Swap volume to dest_type Limitation note: only for compute libvirt driver """ if not dest_type: raise exception.Invalid( message=(_("destination type is required when " "migration type is swap"))) if not self._can_swap(volume): raise exception.Invalid( message=(_("Invalid state for swapping volume"))) user_info = { 'name': self.temp_username, 'password': self.temp_password} user = self._create_user(volume, user_info) keystone_util = keystone_helper.KeystoneHelper(osc=self.osc) try: session = keystone_util.create_session( user.id, self.temp_password) temp_cinder = self._get_cinder_client(session) # swap volume new_volume = self.cinder_util.create_volume( temp_cinder, volume, dest_type) self.nova_util.swap_volume(volume, new_volume) # delete old volume self.cinder_util.delete_volume(volume) finally: keystone_util.delete_user(user) return True def _migrate(self, volume_id, dest_node, dest_type): try: volume = self.cinder_util.get_volume(volume_id) if self.migration_type == self.SWAP: if dest_node: LOG.warning("dest_node is ignored") return self._swap_volume(volume, dest_type) elif self.migration_type == self.RETYPE: return self.cinder_util.retype(volume, dest_type) elif self.migration_type == self.MIGRATE: return self.cinder_util.migrate(volume, dest_node) else: raise exception.Invalid( message=(_("Migration of type '%(migration_type)s' is not " "supported.") % {'migration_type': self.migration_type})) except exception.Invalid as ei: LOG.exception(ei) return False except Exception as e: LOG.critical("Unexpected exception occurred.") LOG.exception(e) return False def execute(self): return self._migrate(self.volume_id, self.destination_node, self.destination_type) def revert(self): LOG.warning("revert not supported") def abort(self): pass def pre_condition(self): pass def post_condition(self): pass def get_description(self): return "Moving a volume to destination_node or destination_type" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/base.py0000664000175000017500000000206100000000000021231 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This component is in charge of executing the :ref:`Action Plan ` built by the :ref:`Watcher Decision Engine `. See: :doc:`../architecture` for more details on this component. """ import abc class BaseApplier(object, metaclass=abc.ABCMeta): @abc.abstractmethod def execute(self, action_plan_uuid): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/default.py0000664000175000017500000000405500000000000021750 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher.applier import base from watcher.applier.loading import default from watcher import objects LOG = log.getLogger(__name__) CONF = cfg.CONF class DefaultApplier(base.BaseApplier): def __init__(self, context, applier_manager): super(DefaultApplier, self).__init__() self._applier_manager = applier_manager self._loader = default.DefaultWorkFlowEngineLoader() self._engine = None self._context = context @property def context(self): return self._context @property def applier_manager(self): return self._applier_manager @property def engine(self): if self._engine is None: selected_workflow_engine = CONF.watcher_applier.workflow_engine LOG.debug("Loading workflow engine %s ", selected_workflow_engine) self._engine = self._loader.load( name=selected_workflow_engine, context=self.context, applier_manager=self.applier_manager) return self._engine def execute(self, action_plan_uuid): LOG.debug("Executing action plan %s ", action_plan_uuid) filters = {'action_plan_uuid': action_plan_uuid} actions = objects.Action.list(self.context, filters=filters, eager=True) return self.engine.execute(actions) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6271353 python_watcher-14.0.0/watcher/applier/loading/0000775000175000017500000000000000000000000021363 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/loading/__init__.py0000664000175000017500000000000000000000000023462 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/loading/default.py0000664000175000017500000000166200000000000023366 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common.loader import default class DefaultWorkFlowEngineLoader(default.DefaultLoader): def __init__(self): super(DefaultWorkFlowEngineLoader, self).__init__( namespace='watcher_workflow_engines') class DefaultActionLoader(default.DefaultLoader): def __init__(self): super(DefaultActionLoader, self).__init__( namespace='watcher_actions') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/manager.py0000664000175000017500000000264600000000000021742 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # Copyright (c) 2016 Intel Corp # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from watcher.applier.messaging import trigger from watcher.common import service_manager from watcher import conf CONF = conf.CONF class ApplierManager(service_manager.ServiceManager): @property def service_name(self): return 'watcher-applier' @property def api_version(self): return '1.0' @property def publisher_id(self): return CONF.watcher_applier.publisher_id @property def conductor_topic(self): return CONF.watcher_applier.conductor_topic @property def notification_topics(self): return [] @property def conductor_endpoints(self): return [trigger.TriggerActionPlan] @property def notification_endpoints(self): return [] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6271353 python_watcher-14.0.0/watcher/applier/messaging/0000775000175000017500000000000000000000000021723 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/messaging/__init__.py0000664000175000017500000000000000000000000024022 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/messaging/trigger.py0000664000175000017500000000330000000000000023734 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import futurist from oslo_config import cfg from oslo_log import log from watcher.applier.action_plan import default LOG = log.getLogger(__name__) CONF = cfg.CONF class TriggerActionPlan(object): def __init__(self, applier_manager): self.applier_manager = applier_manager workers = CONF.watcher_applier.workers self.executor = futurist.GreenThreadPoolExecutor(max_workers=workers) def do_launch_action_plan(self, context, action_plan_uuid): try: cmd = default.DefaultActionPlanHandler(context, self.applier_manager, action_plan_uuid) cmd.execute() except Exception as e: LOG.exception(e) def launch_action_plan(self, context, action_plan_uuid): LOG.debug("Trigger ActionPlan %s", action_plan_uuid) # submit self.executor.submit(self.do_launch_action_plan, context, action_plan_uuid) return action_plan_uuid ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/rpcapi.py0000664000175000017500000000354600000000000021606 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # Copyright (c) 2016 Intel Corp # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import service from watcher.common import service_manager from watcher.common import utils from watcher import conf CONF = conf.CONF class ApplierAPI(service.Service): def __init__(self): super(ApplierAPI, self).__init__(ApplierAPIManager) def launch_action_plan(self, context, action_plan_uuid=None): if not utils.is_uuid_like(action_plan_uuid): raise exception.InvalidUuidOrName(name=action_plan_uuid) self.conductor_client.cast( context, 'launch_action_plan', action_plan_uuid=action_plan_uuid) class ApplierAPIManager(service_manager.ServiceManager): @property def service_name(self): return None @property def api_version(self): return '1.0' @property def publisher_id(self): return CONF.watcher_applier.publisher_id @property def conductor_topic(self): return CONF.watcher_applier.conductor_topic @property def notification_topics(self): return [] @property def conductor_endpoints(self): return [] @property def notification_endpoints(self): return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/sync.py0000664000175000017500000000571400000000000021303 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher.applier.loading import default from watcher.common import context from watcher.common import exception from watcher import objects CONF = cfg.CONF LOG = log.getLogger(__name__) class Syncer(object): """Syncs all available actions with the Watcher DB""" def sync(self): ctx = context.make_context() action_loader = default.DefaultActionLoader() available_actions = action_loader.list_available() for action_type in available_actions.keys(): load_action = action_loader.load(action_type) load_description = load_action.get_description() try: action_desc = objects.ActionDescription.get_by_type( ctx, action_type) if action_desc.description != load_description: action_desc.description = load_description action_desc.save() except exception.ActionDescriptionNotFound: obj_action_desc = objects.ActionDescription(ctx) obj_action_desc.action_type = action_type obj_action_desc.description = load_description obj_action_desc.create() self._cancel_ongoing_actionplans(ctx) def _cancel_ongoing_actionplans(self, context): actions_plans = objects.ActionPlan.list( context, filters={'state': objects.action_plan.State.ONGOING, 'hostname': CONF.host}, eager=True) for ap in actions_plans: ap.state = objects.action_plan.State.CANCELLED ap.save() filters = {'action_plan_uuid': ap.uuid, 'state__in': (objects.action.State.PENDING, objects.action.State.ONGOING)} actions = objects.Action.list(context, filters=filters, eager=True) for a in actions: a.state = objects.action.State.CANCELLED a.save() LOG.info("Action Plan %(uuid)s along with appropriate Actions " "has been cancelled because it was in %(state)s state " "when Applier had been stopped on %(hostname)s host.", {'uuid': ap.uuid, 'state': objects.action_plan.State.ONGOING, 'hostname': ap.hostname}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6271353 python_watcher-14.0.0/watcher/applier/workflow_engine/0000775000175000017500000000000000000000000023145 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/workflow_engine/__init__.py0000664000175000017500000000000000000000000025244 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/workflow_engine/base.py0000664000175000017500000002752500000000000024444 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import time import eventlet from oslo_log import log from taskflow import task as flow_task from watcher.applier.actions import factory from watcher.common import clients from watcher.common import exception from watcher.common.loader import loadable from watcher import notifications from watcher import objects from watcher.objects import fields LOG = log.getLogger(__name__) CANCEL_STATE = [objects.action_plan.State.CANCELLING, objects.action_plan.State.CANCELLED] class BaseWorkFlowEngine(loadable.Loadable, metaclass=abc.ABCMeta): def __init__(self, config, context=None, applier_manager=None): """Constructor :param config: A mapping containing the configuration of this workflow engine :type config: dict :param osc: an OpenStackClients object, defaults to None :type osc: :py:class:`~.OpenStackClients` instance, optional """ super(BaseWorkFlowEngine, self).__init__(config) self._context = context self._applier_manager = applier_manager self._action_factory = factory.ActionFactory() self._osc = None self._is_notified = False self.execution_rule = None @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] @property def context(self): return self._context @property def osc(self): if not self._osc: self._osc = clients.OpenStackClients() return self._osc @property def applier_manager(self): return self._applier_manager @property def action_factory(self): return self._action_factory def notify(self, action, state): db_action = objects.Action.get_by_uuid(self.context, action.uuid, eager=True) db_action.state = state db_action.save() return db_action def notify_cancel_start(self, action_plan_uuid): action_plan = objects.ActionPlan.get_by_uuid(self.context, action_plan_uuid, eager=True) if not self._is_notified: self._is_notified = True notifications.action_plan.send_cancel_notification( self._context, action_plan, action=fields.NotificationAction.CANCEL, phase=fields.NotificationPhase.START) @abc.abstractmethod def execute(self, actions): raise NotImplementedError() class BaseTaskFlowActionContainer(flow_task.Task): def __init__(self, name, db_action, engine, **kwargs): super(BaseTaskFlowActionContainer, self).__init__(name=name) self._db_action = db_action self._engine = engine self.loaded_action = None @property def engine(self): return self._engine @property def action(self): if self.loaded_action is None: action = self.engine.action_factory.make_action( self._db_action, osc=self._engine.osc) self.loaded_action = action return self.loaded_action @abc.abstractmethod def do_pre_execute(self): raise NotImplementedError() @abc.abstractmethod def do_execute(self, *args, **kwargs): raise NotImplementedError() @abc.abstractmethod def do_post_execute(self): raise NotImplementedError() @abc.abstractmethod def do_revert(self): raise NotImplementedError() @abc.abstractmethod def do_abort(self, *args, **kwargs): raise NotImplementedError() # NOTE(alexchadin): taskflow does 3 method calls (pre_execute, execute, # post_execute) independently. We want to support notifications in base # class, so child's methods should be named with `do_` prefix and wrapped. def pre_execute(self): try: # NOTE(adisky): check the state of action plan before starting # next action, if action plan is cancelled raise the exceptions # so that taskflow does not schedule further actions. action_plan = objects.ActionPlan.get_by_id( self.engine.context, self._db_action.action_plan_id) if action_plan.state in CANCEL_STATE: raise exception.ActionPlanCancelled(uuid=action_plan.uuid) db_action = self.do_pre_execute() notifications.action.send_execution_notification( self.engine.context, db_action, fields.NotificationAction.EXECUTION, fields.NotificationPhase.START) except exception.ActionPlanCancelled as e: LOG.exception(e) self.engine.notify_cancel_start(action_plan.uuid) raise except Exception as e: LOG.exception(e) db_action = self.engine.notify(self._db_action, objects.action.State.FAILED) notifications.action.send_execution_notification( self.engine.context, db_action, fields.NotificationAction.EXECUTION, fields.NotificationPhase.ERROR, priority=fields.NotificationPriority.ERROR) def execute(self, *args, **kwargs): def _do_execute_action(*args, **kwargs): try: db_action = self.do_execute(*args, **kwargs) notifications.action.send_execution_notification( self.engine.context, db_action, fields.NotificationAction.EXECUTION, fields.NotificationPhase.END) except Exception as e: LOG.exception(e) LOG.error('The workflow engine has failed ' 'to execute the action: %s', self.name) db_action = self.engine.notify(self._db_action, objects.action.State.FAILED) notifications.action.send_execution_notification( self.engine.context, db_action, fields.NotificationAction.EXECUTION, fields.NotificationPhase.ERROR, priority=fields.NotificationPriority.ERROR) raise # NOTE: spawn a new thread for action execution, so that if action plan # is cancelled workflow engine will not wait to finish action execution et = eventlet.spawn(_do_execute_action, *args, **kwargs) # NOTE: check for the state of action plan periodically,so that if # action is finished or action plan is cancelled we can exit from here. result = False while True: action_object = objects.Action.get_by_uuid( self.engine.context, self._db_action.uuid, eager=True) action_plan_object = objects.ActionPlan.get_by_id( self.engine.context, action_object.action_plan_id) if action_object.state == objects.action.State.SUCCEEDED: result = True if (action_object.state in [objects.action.State.SUCCEEDED, objects.action.State.FAILED] or action_plan_object.state in CANCEL_STATE): break time.sleep(1) try: # NOTE: kill the action execution thread, if action plan is # cancelled for all other cases wait for the result from action # execution thread. # Not all actions support abort operations, kill only those action # which support abort operations abort = self.action.check_abort() if (action_plan_object.state in CANCEL_STATE and abort): et.kill() et.wait() return result # NOTE: catch the greenlet exit exception due to thread kill, # taskflow will call revert for the action, # we will redirect it to abort. except eventlet.greenlet.GreenletExit: self.engine.notify_cancel_start(action_plan_object.uuid) raise exception.ActionPlanCancelled(uuid=action_plan_object.uuid) except Exception as e: LOG.exception(e) # return False instead of raising an exception return False def post_execute(self): try: self.do_post_execute() except Exception as e: LOG.exception(e) db_action = self.engine.notify(self._db_action, objects.action.State.FAILED) notifications.action.send_execution_notification( self.engine.context, db_action, fields.NotificationAction.EXECUTION, fields.NotificationPhase.ERROR, priority=fields.NotificationPriority.ERROR) def revert(self, *args, **kwargs): action_plan = objects.ActionPlan.get_by_id( self.engine.context, self._db_action.action_plan_id, eager=True) # NOTE: check if revert cause by cancel action plan or # some other exception occurred during action plan execution # if due to some other exception keep the flow intact. if action_plan.state not in CANCEL_STATE: self.do_revert() return action_object = objects.Action.get_by_uuid( self.engine.context, self._db_action.uuid, eager=True) try: if action_object.state == objects.action.State.ONGOING: action_object.state = objects.action.State.CANCELLING action_object.save() notifications.action.send_cancel_notification( self.engine.context, action_object, fields.NotificationAction.CANCEL, fields.NotificationPhase.START) action_object = self.abort() notifications.action.send_cancel_notification( self.engine.context, action_object, fields.NotificationAction.CANCEL, fields.NotificationPhase.END) if action_object.state == objects.action.State.PENDING: notifications.action.send_cancel_notification( self.engine.context, action_object, fields.NotificationAction.CANCEL, fields.NotificationPhase.START) action_object.state = objects.action.State.CANCELLED action_object.save() notifications.action.send_cancel_notification( self.engine.context, action_object, fields.NotificationAction.CANCEL, fields.NotificationPhase.END) except Exception as e: LOG.exception(e) action_object.state = objects.action.State.FAILED action_object.save() notifications.action.send_cancel_notification( self.engine.context, action_object, fields.NotificationAction.CANCEL, fields.NotificationPhase.ERROR, priority=fields.NotificationPriority.ERROR) def abort(self, *args, **kwargs): return self.do_abort(*args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/applier/workflow_engine/default.py0000664000175000017500000001732300000000000025151 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from taskflow import engines from taskflow import exceptions as tf_exception from taskflow.patterns import graph_flow as gf from taskflow import task as flow_task from watcher.applier.workflow_engine import base from watcher.common import exception from watcher import conf from watcher import objects CONF = conf.CONF LOG = log.getLogger(__name__) class DefaultWorkFlowEngine(base.BaseWorkFlowEngine): """Taskflow as a workflow engine for Watcher Full documentation on taskflow at https://docs.openstack.org/taskflow/latest """ def decider(self, history): # decider – A callback function that will be expected to # decide at runtime whether v should be allowed to execute # (or whether the execution of v should be ignored, # and therefore not executed). It is expected to take as single # keyword argument history which will be the execution results of # all u decidable links that have v as a target. It is expected # to return a single boolean # (True to allow v execution or False to not). LOG.info("decider history: %s", history) if history and self.execution_rule == 'ANY': return not list(history.values())[0] else: return True @classmethod def get_config_opts(cls): return [ cfg.IntOpt( 'max_workers', default=processutils.get_worker_count(), min=1, required=True, help='Number of workers for taskflow engine ' 'to execute actions.'), cfg.DictOpt( 'action_execution_rule', default={}, help='The execution rule for linked actions,' 'the key is strategy name and ' 'value ALWAYS means all actions will be executed,' 'value ANY means if previous action executes ' 'success, the next action will be ignored.' 'None means ALWAYS.') ] def get_execution_rule(self, actions): if actions: actionplan_object = objects.ActionPlan.get_by_id( self.context, actions[0].action_plan_id) strategy_object = objects.Strategy.get_by_id( self.context, actionplan_object.strategy_id) return self.config.action_execution_rule.get( strategy_object.name) def execute(self, actions): try: # NOTE(jed) We want to have a strong separation of concern # between the Watcher planner and the Watcher Applier in order # to us the possibility to support several workflow engine. # We want to provide the 'taskflow' engine by # default although we still want to leave the possibility for # the users to change it. # The current implementation uses graph with linked actions. # todo(jed) add oslo conf for retry and name self.execution_rule = self.get_execution_rule(actions) flow = gf.Flow("watcher_flow") actions_uuid = {} for a in actions: task = TaskFlowActionContainer(a, self) flow.add(task) actions_uuid[a.uuid] = task for a in actions: for parent_id in a.parents: flow.link(actions_uuid[parent_id], actions_uuid[a.uuid], decider=self.decider) e = engines.load( flow, executor='greenthreaded', engine='parallel', max_workers=self.config.max_workers) e.run() return flow except exception.ActionPlanCancelled: raise except tf_exception.WrappedFailure as e: if e.check("watcher.common.exception.ActionPlanCancelled"): raise exception.ActionPlanCancelled else: raise exception.WorkflowExecutionException(error=e) except Exception as e: raise exception.WorkflowExecutionException(error=e) class TaskFlowActionContainer(base.BaseTaskFlowActionContainer): def __init__(self, db_action, engine): self.name = "action_type:{0} uuid:{1}".format(db_action.action_type, db_action.uuid) super(TaskFlowActionContainer, self).__init__(self.name, db_action, engine) def do_pre_execute(self): db_action = self.engine.notify(self._db_action, objects.action.State.ONGOING) LOG.debug("Pre-condition action: %s", self.name) self.action.pre_condition() return db_action def do_execute(self, *args, **kwargs): LOG.debug("Running action: %s", self.name) # NOTE:Some actions(such as migrate) will return None when exception # Only when True is returned, the action state is set to SUCCEEDED result = self.action.execute() if result is True: return self.engine.notify(self._db_action, objects.action.State.SUCCEEDED) else: self.engine.notify(self._db_action, objects.action.State.FAILED) raise exception.ActionExecutionFailure( action_id=self._db_action.uuid) def do_post_execute(self): LOG.debug("Post-condition action: %s", self.name) self.action.post_condition() def do_revert(self, *args, **kwargs): # NOTE: Not rollback action plan if not CONF.watcher_applier.rollback_when_actionplan_failed: LOG.info("Failed actionplan rollback option is turned off, and " "the following action will be skipped: %s", self.name) return LOG.warning("Revert action: %s", self.name) try: # TODO(jed): do we need to update the states in case of failure? self.action.revert() except Exception as e: LOG.exception(e) LOG.critical("Oops! We need a disaster recover plan.") def do_abort(self, *args, **kwargs): LOG.warning("Aborting action: %s", self.name) try: result = self.action.abort() if result: # Aborted the action. return self.engine.notify(self._db_action, objects.action.State.CANCELLED) else: return self.engine.notify(self._db_action, objects.action.State.SUCCEEDED) except Exception as e: LOG.exception(e) return self.engine.notify(self._db_action, objects.action.State.FAILED) class TaskFlowNop(flow_task.Task): """This class is used in case of the workflow have only one Action. We need at least two atoms to create a link. """ def execute(self): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6271353 python_watcher-14.0.0/watcher/cmd/0000775000175000017500000000000000000000000017055 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/cmd/__init__.py0000664000175000017500000000303300000000000021165 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(licanwei): Do eventlet monkey patching here, instead of in # common/service.py. This allows the API service to run without monkey # patching under Apache (which uses its own concurrency model). Mixing # concurrency models can cause undefined behavior and potentially API timeouts. # NOTE(sean-k-mooney) while ^ is true, since that was written asyncio was added # to the code base in addition to apscheduler which provides native threads. # As such we have a lot of technical debt to fix with regards to watchers # concurrency model as we are mixing up to 3 models the same process. # apscheduler does not technically support eventlet but it has mostly worked # until now, apscheduler is used to provide a job schedulers which mixes # monkey patched and non monkey patched code in the same process. # That is problematic and can lead to errors on python 3.12+. # The maas support added asyncio to the codebase which is unsafe to mix # with eventlets by default. from watcher import eventlet eventlet.patch() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/cmd/api.py0000664000175000017500000000330500000000000020201 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for the Watcher API service.""" import sys from oslo_config import cfg from oslo_log import log from watcher.api import scheduling from watcher.common import service from watcher import conf LOG = log.getLogger(__name__) CONF = conf.CONF def main(): service.prepare_service(sys.argv, CONF) host, port = cfg.CONF.api.host, cfg.CONF.api.port protocol = "http" if not CONF.api.enable_ssl_api else "https" # Build and start the WSGI app server = service.WSGIService('watcher-api', CONF.api.enable_ssl_api) if host == '127.0.0.1': LOG.info('serving on 127.0.0.1:%(port)s, ' 'view at %(protocol)s://127.0.0.1:%(port)s', dict(protocol=protocol, port=port)) else: LOG.info('serving on %(protocol)s://%(host)s:%(port)s', dict(protocol=protocol, host=host, port=port)) api_schedule = scheduling.APISchedulingService() api_schedule.start() launcher = service.launch(CONF, server, workers=server.workers) launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/cmd/applier.py0000664000175000017500000000246700000000000021074 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for the Applier service.""" import os import sys from oslo_log import log from watcher.applier import manager from watcher.applier import sync from watcher.common import service as watcher_service from watcher import conf LOG = log.getLogger(__name__) CONF = conf.CONF def main(): watcher_service.prepare_service(sys.argv, CONF) LOG.info('Starting Watcher Applier service in PID %s', os.getpid()) applier_service = watcher_service.Service(manager.ApplierManager) syncer = sync.Syncer() syncer.sync() # Only 1 process launcher = watcher_service.launch(CONF, applier_service) launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/cmd/dbmanage.py0000664000175000017500000001237100000000000021171 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Run storage database migration. """ import sys from oslo_config import cfg from watcher.common import service from watcher import conf from watcher.db import migration from watcher.db import purge CONF = conf.CONF class DBCommand(object): @staticmethod def upgrade(): migration.upgrade(CONF.command.revision) @staticmethod def downgrade(): migration.downgrade(CONF.command.revision) @staticmethod def revision(): migration.revision(CONF.command.message, CONF.command.autogenerate) @staticmethod def stamp(): migration.stamp(CONF.command.revision) @staticmethod def version(): print(migration.version()) @staticmethod def create_schema(): migration.create_schema() @staticmethod def purge(): purge.purge(CONF.command.age_in_days, CONF.command.max_number, CONF.command.goal, CONF.command.exclude_orphans, CONF.command.dry_run) def add_command_parsers(subparsers): parser = subparsers.add_parser( 'upgrade', help="Upgrade the database schema to the latest version. " "Optionally, use --revision to specify an alembic revision " "string to upgrade to.") parser.set_defaults(func=DBCommand.upgrade) parser.add_argument('--revision', nargs='?') parser = subparsers.add_parser( 'downgrade', help="Downgrade the database schema to the oldest revision. " "While optional, one should generally use --revision to " "specify the alembic revision string to downgrade to.") parser.set_defaults(func=DBCommand.downgrade) parser.add_argument('--revision', nargs='?') parser = subparsers.add_parser('stamp') parser.add_argument('revision', nargs='?') parser.set_defaults(func=DBCommand.stamp) parser = subparsers.add_parser( 'revision', help="Create a new alembic revision. " "Use --message to set the message string.") parser.add_argument('-m', '--message') parser.add_argument('--autogenerate', action='store_true') parser.set_defaults(func=DBCommand.revision) parser = subparsers.add_parser( 'version', help="Print the current version information and exit.") parser.set_defaults(func=DBCommand.version) parser = subparsers.add_parser( 'create_schema', help="Create the database schema.") parser.set_defaults(func=DBCommand.create_schema) parser = subparsers.add_parser( 'purge', help="Purge the database.") parser.add_argument('-d', '--age-in-days', help="Number of days since deletion (from today) " "to exclude from the purge. If None, everything " "will be purged.", type=int, default=None, nargs='?') parser.add_argument('-n', '--max-number', help="Max number of objects expected to be deleted. " "Prevents the deletion if exceeded. No limit if " "set to None.", type=int, default=None, nargs='?') parser.add_argument('-t', '--goal', help="UUID or name of the goal to purge.", type=str, default=None, nargs='?') parser.add_argument('-e', '--exclude-orphans', action='store_true', help="Flag to indicate whether or not you want to " "exclude orphans from deletion (default: False).", default=False) parser.add_argument('--dry-run', action='store_true', help="Flag to indicate whether or not you want to " "perform a dry run (no deletion).", default=False) parser.set_defaults(func=DBCommand.purge) command_opt = cfg.SubCommandOpt('command', title='Command', help='Available commands', handler=add_command_parsers) def register_sub_command_opts(): cfg.CONF.register_cli_opt(command_opt) def main(): register_sub_command_opts() # this is hack to work with previous usage of watcher-dbsync # pls change it to watcher-dbsync upgrade valid_commands = set([ 'upgrade', 'downgrade', 'revision', 'version', 'stamp', 'create_schema', 'purge', ]) if not set(sys.argv).intersection(valid_commands): sys.argv.append('upgrade') service.prepare_service(sys.argv, CONF) CONF.command.func() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/cmd/decisionengine.py0000664000175000017500000000313200000000000022411 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for the Decision Engine manager service.""" import os import sys from oslo_log import log from watcher.common import service as watcher_service from watcher import conf from watcher.decision_engine import gmr from watcher.decision_engine import manager from watcher.decision_engine import scheduling from watcher.decision_engine import sync LOG = log.getLogger(__name__) CONF = conf.CONF def main(): watcher_service.prepare_service(sys.argv, CONF) gmr.register_gmr_plugins() LOG.info('Starting Watcher Decision Engine service in PID %s', os.getpid()) syncer = sync.Syncer() syncer.sync() de_service = watcher_service.Service(manager.DecisionEngineManager) bg_scheduler_service = scheduling.DecisionEngineSchedulingService() # Only 1 process launcher = watcher_service.launch(CONF, de_service) launcher.launch_service(bg_scheduler_service) launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/cmd/status.py0000664000175000017500000000344500000000000020760 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_upgradecheck import common_checks from oslo_upgradecheck import upgradecheck from watcher._i18n import _ from watcher.common import clients from watcher import conf CONF = conf.CONF class Checks(upgradecheck.UpgradeCommands): """Contains upgrade checks Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ def _minimum_nova_api_version(self): """Checks the minimum required version of nova_client.api_version""" try: clients.check_min_nova_api_version(CONF.nova_client.api_version) except ValueError as e: return upgradecheck.Result( upgradecheck.Code.FAILURE, str(e)) return upgradecheck.Result(upgradecheck.Code.SUCCESS) _upgrade_checks = ( # Added in Train. (_('Minimum Nova API Version'), _minimum_nova_api_version), # Added in Wallaby. (_("Policy File JSON to YAML Migration"), (common_checks.check_policy_json, {'conf': CONF})), ) def main(): return upgradecheck.main( CONF, project='watcher', upgrade_command=Checks()) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/cmd/sync.py0000664000175000017500000000204300000000000020402 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Script for the sync tool.""" import sys from oslo_log import log from watcher.common import service from watcher import conf from watcher.decision_engine import sync LOG = log.getLogger(__name__) CONF = conf.CONF def main(): LOG.info('Watcher sync started.') service.prepare_service(sys.argv, CONF) syncer = sync.Syncer() syncer.sync() LOG.info('Watcher sync finished.') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6271353 python_watcher-14.0.0/watcher/common/0000775000175000017500000000000000000000000017602 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/__init__.py0000664000175000017500000000000000000000000021701 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/cinder_helper.py0000664000175000017500000002255700000000000022772 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from oslo_log import log from cinderclient import exceptions as cinder_exception from cinderclient.v3.volumes import Volume from watcher._i18n import _ from watcher.common import clients from watcher.common import exception from watcher import conf CONF = conf.CONF LOG = log.getLogger(__name__) class CinderHelper(object): def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.cinder = self.osc.cinder() def get_storage_node_list(self): return list(self.cinder.services.list(binary='cinder-volume')) def get_storage_node_by_name(self, name): """Get storage node by name(host@backendname)""" try: storages = [storage for storage in self.get_storage_node_list() if storage.host == name] if len(storages) != 1: raise exception.StorageNodeNotFound(name=name) return storages[0] except Exception as exc: LOG.exception(exc) raise exception.StorageNodeNotFound(name=name) def get_storage_pool_list(self): return self.cinder.pools.list(detailed=True) def get_storage_pool_by_name(self, name): """Get pool by name(host@backend#poolname)""" try: pools = [pool for pool in self.get_storage_pool_list() if pool.name == name] if len(pools) != 1: raise exception.PoolNotFound(name=name) return pools[0] except Exception as exc: LOG.exception(exc) raise exception.PoolNotFound(name=name) def get_volume_list(self): return self.cinder.volumes.list(search_opts={'all_tenants': True}) def get_volume_type_list(self): return self.cinder.volume_types.list() def get_volume_snapshots_list(self): return self.cinder.volume_snapshots.list( search_opts={'all_tenants': True}) def get_volume_type_by_backendname(self, backendname): """Return a list of volume type""" volume_type_list = self.get_volume_type_list() volume_type = [volume_type.name for volume_type in volume_type_list if volume_type.extra_specs.get( 'volume_backend_name') == backendname] return volume_type def get_volume(self, volume): if isinstance(volume, Volume): volume = volume.id try: volume = self.cinder.volumes.get(volume) return volume except cinder_exception.NotFound: return self.cinder.volumes.find(name=volume) def backendname_from_poolname(self, poolname): """Get backendname from poolname""" # pooolname formatted as host@backend#pool since ocata # as of ocata, may as only host backend = poolname.split('#')[0] backendname = "" try: backendname = backend.split('@')[1] except IndexError: pass return backendname def _has_snapshot(self, volume): """Judge volume has a snapshot""" volume = self.get_volume(volume) if volume.snapshot_id: return True return False def get_deleting_volume(self, volume): volume = self.get_volume(volume) all_volume = self.get_volume_list() for _volume in all_volume: if getattr(_volume, 'os-vol-mig-status-attr:name_id') == volume.id: return _volume return False def _can_get_volume(self, volume_id): """Check to get volume with volume_id""" try: volume = self.get_volume(volume_id) if not volume: raise Exception except cinder_exception.NotFound: return False else: return True def check_volume_deleted(self, volume, retry=120, retry_interval=10): """Check volume has been deleted""" volume = self.get_volume(volume) while self._can_get_volume(volume.id) and retry: volume = self.get_volume(volume.id) time.sleep(retry_interval) retry -= 1 LOG.debug("retry count: %s", retry) LOG.debug("Waiting to complete deletion of volume %s", volume.id) if self._can_get_volume(volume.id): LOG.error("Volume deletion error: %s", volume.id) return False LOG.debug("Volume %s was deleted successfully.", volume.id) return True def check_migrated(self, volume, retry_interval=10): volume = self.get_volume(volume) final_status = ('success', 'error') while getattr(volume, 'migration_status') not in final_status: volume = self.get_volume(volume.id) LOG.debug('Waiting the migration of %s', volume) time.sleep(retry_interval) if getattr(volume, 'migration_status') == 'error': host_name = getattr(volume, 'os-vol-host-attr:host') error_msg = (("Volume migration error : " "volume %(volume)s is now on host '%(host)s'.") % {'volume': volume.id, 'host': host_name}) LOG.error(error_msg) return False host_name = getattr(volume, 'os-vol-host-attr:host') if getattr(volume, 'migration_status') == 'success': # check original volume deleted deleting_volume = self.get_deleting_volume(volume) if deleting_volume: delete_id = getattr(deleting_volume, 'id') if not self.check_volume_deleted(delete_id): return False else: host_name = getattr(volume, 'os-vol-host-attr:host') error_msg = (("Volume migration error : " "volume %(volume)s is now on host '%(host)s'.") % {'volume': volume.id, 'host': host_name}) LOG.error(error_msg) return False LOG.debug( "Volume migration succeeded : volume %s is now on host '%s'.", ( volume.id, host_name)) return True def migrate(self, volume, dest_node): """Migrate volume to dest_node""" volume = self.get_volume(volume) dest_backend = self.backendname_from_poolname(dest_node) dest_type = self.get_volume_type_by_backendname(dest_backend) if volume.volume_type not in dest_type: raise exception.Invalid( message=(_("Volume type must be same for migrating"))) source_node = getattr(volume, 'os-vol-host-attr:host') LOG.debug("Volume %s found on host '%s'.", (volume.id, source_node)) self.cinder.volumes.migrate_volume( volume, dest_node, False, True) return self.check_migrated(volume) def retype(self, volume, dest_type): """Retype volume to dest_type with on-demand option""" volume = self.get_volume(volume) if volume.volume_type == dest_type: raise exception.Invalid( message=(_("Volume type must be different for retyping"))) source_node = getattr(volume, 'os-vol-host-attr:host') LOG.debug( "Volume %s found on host '%s'.", (volume.id, source_node)) self.cinder.volumes.retype( volume, dest_type, "on-demand") return self.check_migrated(volume) def create_volume(self, cinder, volume, dest_type, retry=120, retry_interval=10): """Create volume of volume with dest_type using cinder""" volume = self.get_volume(volume) LOG.debug("start creating new volume") new_volume = cinder.volumes.create( getattr(volume, 'size'), name=getattr(volume, 'name'), volume_type=dest_type, availability_zone=getattr(volume, 'availability_zone')) while getattr(new_volume, 'status') != 'available' and retry: new_volume = cinder.volumes.get(new_volume.id) LOG.debug('Waiting volume creation of %s', new_volume) time.sleep(retry_interval) retry -= 1 LOG.debug("retry count: %s", retry) if getattr(new_volume, 'status') != 'available': error_msg = (_("Failed to create volume '%(volume)s. ") % {'volume': new_volume.id}) raise Exception(error_msg) LOG.debug("Volume %s was created successfully.", new_volume) return new_volume def delete_volume(self, volume): """Delete volume""" volume = self.get_volume(volume) self.cinder.volumes.delete(volume) result = self.check_volume_deleted(volume) if not result: error_msg = (_("Failed to delete volume '%(volume)s. ") % {'volume': volume.id}) raise Exception(error_msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/clients.py0000664000175000017500000002676500000000000021635 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from cinderclient import client as ciclient from glanceclient import client as glclient from gnocchiclient import client as gnclient from ironicclient import client as irclient from keystoneauth1 import adapter as ka_adapter from keystoneauth1 import loading as ka_loading from keystoneclient import client as keyclient from monascaclient import client as monclient from neutronclient.neutron import client as netclient from novaclient import api_versions as nova_api_versions from novaclient import client as nvclient from watcher.common import exception from watcher.common import utils try: from maas import client as maas_client except ImportError: maas_client = None CONF = cfg.CONF _CLIENTS_AUTH_GROUP = 'watcher_clients_auth' # NOTE(mriedem): This is the minimum required version of the nova API for # watcher features to work. If new features are added which require new # versions, they should perform version discovery and be backward compatible # for at least one release before raising the minimum required version. MIN_NOVA_API_VERSION = '2.56' def check_min_nova_api_version(config_version): """Validates the minimum required nova API version. :param config_version: The configured [nova_client]/api_version value :raises: ValueError if the configured version is less than the required minimum """ min_required = nova_api_versions.APIVersion(MIN_NOVA_API_VERSION) if nova_api_versions.APIVersion(config_version) < min_required: raise ValueError('Invalid nova_client.api_version %s. %s or ' 'greater is required.' % (config_version, MIN_NOVA_API_VERSION)) class OpenStackClients(object): """Convenience class to create and cache client instances.""" def __init__(self): self.reset_clients() def reset_clients(self): self._session = None self._keystone = None self._nova = None self._glance = None self._gnocchi = None self._cinder = None self._monasca = None self._neutron = None self._ironic = None self._maas = None self._placement = None def _get_keystone_session(self): auth = ka_loading.load_auth_from_conf_options(CONF, _CLIENTS_AUTH_GROUP) sess = ka_loading.load_session_from_conf_options(CONF, _CLIENTS_AUTH_GROUP, auth=auth) return sess @property def auth_url(self): return self.keystone().auth_url @property def session(self): if not self._session: self._session = self._get_keystone_session() return self._session def _get_client_option(self, client, option): return getattr(getattr(CONF, '%s_client' % client), option) @exception.wrap_keystone_exception def keystone(self): if self._keystone: return self._keystone keystone_interface = self._get_client_option('keystone', 'interface') keystone_region_name = self._get_client_option('keystone', 'region_name') self._keystone = keyclient.Client( interface=keystone_interface, region_name=keystone_region_name, session=self.session) return self._keystone @exception.wrap_keystone_exception def nova(self): if self._nova: return self._nova novaclient_version = self._get_client_option('nova', 'api_version') check_min_nova_api_version(novaclient_version) nova_endpoint_type = self._get_client_option('nova', 'endpoint_type') nova_region_name = self._get_client_option('nova', 'region_name') self._nova = nvclient.Client(novaclient_version, endpoint_type=nova_endpoint_type, region_name=nova_region_name, session=self.session) return self._nova @exception.wrap_keystone_exception def glance(self): if self._glance: return self._glance glanceclient_version = self._get_client_option('glance', 'api_version') glance_endpoint_type = self._get_client_option('glance', 'endpoint_type') glance_region_name = self._get_client_option('glance', 'region_name') self._glance = glclient.Client(glanceclient_version, interface=glance_endpoint_type, region_name=glance_region_name, session=self.session) return self._glance @exception.wrap_keystone_exception def gnocchi(self): if self._gnocchi: return self._gnocchi gnocchiclient_version = self._get_client_option('gnocchi', 'api_version') gnocchiclient_interface = self._get_client_option('gnocchi', 'endpoint_type') gnocchiclient_region_name = self._get_client_option('gnocchi', 'region_name') adapter_options = { "interface": gnocchiclient_interface, "region_name": gnocchiclient_region_name } self._gnocchi = gnclient.Client(gnocchiclient_version, adapter_options=adapter_options, session=self.session) return self._gnocchi @exception.wrap_keystone_exception def cinder(self): if self._cinder: return self._cinder cinderclient_version = self._get_client_option('cinder', 'api_version') cinder_endpoint_type = self._get_client_option('cinder', 'endpoint_type') cinder_region_name = self._get_client_option('cinder', 'region_name') self._cinder = ciclient.Client(cinderclient_version, endpoint_type=cinder_endpoint_type, region_name=cinder_region_name, session=self.session) return self._cinder @exception.wrap_keystone_exception def monasca(self): if self._monasca: return self._monasca monascaclient_version = self._get_client_option( 'monasca', 'api_version') monascaclient_interface = self._get_client_option( 'monasca', 'interface') monascaclient_region = self._get_client_option( 'monasca', 'region_name') token = self.session.get_token() watcher_clients_auth_config = CONF.get(_CLIENTS_AUTH_GROUP) service_type = 'monitoring' monasca_kwargs = { 'auth_url': watcher_clients_auth_config.auth_url, 'cert_file': watcher_clients_auth_config.certfile, 'insecure': watcher_clients_auth_config.insecure, 'key_file': watcher_clients_auth_config.keyfile, 'keystone_timeout': watcher_clients_auth_config.timeout, 'os_cacert': watcher_clients_auth_config.cafile, 'service_type': service_type, 'token': token, 'username': watcher_clients_auth_config.username, 'password': watcher_clients_auth_config.password, } endpoint = self.session.get_endpoint(service_type=service_type, interface=monascaclient_interface, region_name=monascaclient_region) self._monasca = monclient.Client( monascaclient_version, endpoint, **monasca_kwargs) return self._monasca @exception.wrap_keystone_exception def neutron(self): if self._neutron: return self._neutron neutronclient_version = self._get_client_option('neutron', 'api_version') neutron_endpoint_type = self._get_client_option('neutron', 'endpoint_type') neutron_region_name = self._get_client_option('neutron', 'region_name') self._neutron = netclient.Client(neutronclient_version, endpoint_type=neutron_endpoint_type, region_name=neutron_region_name, session=self.session) self._neutron.format = 'json' return self._neutron @exception.wrap_keystone_exception def ironic(self): if self._ironic: return self._ironic ironicclient_version = self._get_client_option('ironic', 'api_version') endpoint_type = self._get_client_option('ironic', 'endpoint_type') ironic_region_name = self._get_client_option('ironic', 'region_name') self._ironic = irclient.get_client(ironicclient_version, interface=endpoint_type, region_name=ironic_region_name, session=self.session) return self._ironic def maas(self): if self._maas: return self._maas if not maas_client: raise exception.UnsupportedError( "MAAS client unavailable. Please install python-libmaas.") url = self._get_client_option('maas', 'url') api_key = self._get_client_option('maas', 'api_key') timeout = self._get_client_option('maas', 'timeout') self._maas = utils.async_compat_call( maas_client.connect, url, apikey=api_key, timeout=timeout) return self._maas @exception.wrap_keystone_exception def placement(self): if self._placement: return self._placement placement_version = self._get_client_option('placement', 'api_version') placement_interface = self._get_client_option('placement', 'interface') placement_region_name = self._get_client_option('placement', 'region_name') # Set accept header on every request to ensure we notify placement # service of our response body media type preferences. headers = {'accept': 'application/json'} self._placement = ka_adapter.Adapter( session=self.session, service_type='placement', default_microversion=placement_version, interface=placement_interface, region_name=placement_region_name, additional_headers=headers) return self._placement ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/config.py0000664000175000017500000000267100000000000021427 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from watcher.common import rpc from watcher import version def parse_args(argv, default_config_files=None, default_config_dirs=None): default_config_files = (default_config_files or cfg.find_config_files(project='watcher')) default_config_dirs = (default_config_dirs or cfg.find_config_dirs(project='watcher')) rpc.set_defaults(control_exchange='watcher') cfg.CONF(argv[1:], project='watcher', version=version.version_info.release_string(), default_config_dirs=default_config_dirs, default_config_files=default_config_files) rpc.init(cfg.CONF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/context.py0000664000175000017500000001104300000000000021637 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_context import context from oslo_db.sqlalchemy import enginefacade from oslo_log import log from oslo_utils import timeutils LOG = log.getLogger(__name__) @enginefacade.transaction_context_provider class RequestContext(context.RequestContext): """Extends security contexts from the OpenStack common library.""" def __init__(self, user_id=None, project_id=None, is_admin=None, roles=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, user_name=None, project_name=None, domain_name=None, domain_id=None, auth_token_info=None, **kwargs): """Stores several additional request parameters: :param domain_id: The ID of the domain. :param domain_name: The name of the domain. :param is_public_api: Specifies whether the request should be processed without authentication. """ user = kwargs.pop('user', None) tenant = kwargs.pop('tenant', None) super(RequestContext, self).__init__( auth_token=auth_token, user_id=user_id or user, project_id=project_id or tenant, domain_id=kwargs.pop('domain', None) or domain_name or domain_id, user_domain_id=kwargs.pop('user_domain', None), project_domain_id=kwargs.pop('project_domain', None), is_admin=is_admin, read_only=kwargs.pop('read_only', False), show_deleted=kwargs.pop('show_deleted', False), request_id=request_id, resource_uuid=kwargs.pop('resource_uuid', None), is_admin_project=kwargs.pop('is_admin_project', True), overwrite=overwrite, roles=roles, global_request_id=kwargs.pop('global_request_id', None), system_scope=kwargs.pop('system_scope', None)) self.remote_address = kwargs.pop('remote_address', None) self.read_deleted = kwargs.pop('read_deleted', None) self.service_catalog = kwargs.pop('service_catalog', None) self.quota_class = kwargs.pop('quota_class', None) # FIXME(dims): user_id and project_id duplicate information that is # already present in the oslo_context's RequestContext. We need to # get rid of them. self.domain_name = domain_name self.domain_id = domain_id self.auth_token_info = auth_token_info self.user_id = user_id or user self.project_id = project_id if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, str): timestamp = timeutils.parse_isotime(timestamp) self.timestamp = timestamp self.user_name = user_name self.project_name = project_name self.is_admin = is_admin # if self.is_admin is None: # self.is_admin = policy.check_is_admin(self) def to_dict(self): values = super(RequestContext, self).to_dict() # FIXME(dims): defensive hasattr() checks need to be # removed once we figure out why we are seeing stack # traces values.update({ 'user_id': getattr(self, 'user_id', None), 'user_name': getattr(self, 'user_name', None), 'project_id': getattr(self, 'project_id', None), 'project_name': getattr(self, 'project_name', None), 'domain_id': getattr(self, 'domain_id', None), 'domain_name': getattr(self, 'domain_name', None), 'auth_token_info': getattr(self, 'auth_token_info', None), 'is_admin': getattr(self, 'is_admin', None), 'timestamp': self.timestamp.isoformat() if hasattr( self, 'timestamp') else None, 'request_id': getattr(self, 'request_id', None), }) return values @classmethod def from_dict(cls, values): return cls(**values) def __str__(self): return "" % self.to_dict() def make_context(*args, **kwargs): return RequestContext(*args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/exception.py0000664000175000017500000003412000000000000022152 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Watcher base exception handling. Includes decorator for re-raising Watcher-type exceptions. SHOULD include dedicated exception logging. """ import functools import sys from http import HTTPStatus from keystoneclient import exceptions as keystone_exceptions from oslo_config import cfg from oslo_log import log from watcher._i18n import _ LOG = log.getLogger(__name__) CONF = cfg.CONF def wrap_keystone_exception(func): """Wrap keystone exceptions and throw Watcher specific exceptions.""" @functools.wraps(func) def wrapped(*args, **kw): try: return func(*args, **kw) except keystone_exceptions.AuthorizationFailure: raise AuthorizationFailure( client=func.__name__, reason=sys.exc_info()[1]) except keystone_exceptions.ClientException: raise AuthorizationFailure( client=func.__name__, reason=(_('Unexpected keystone client error occurred: %s') % sys.exc_info()[1])) return wrapped class WatcherException(Exception): """Base Watcher Exception To correctly use this class, inherit from it and define a 'msg_fmt' property. That msg_fmt will get printf'd with the keyword arguments provided to the constructor. """ msg_fmt = _("An unknown exception occurred") code = HTTPStatus.INTERNAL_SERVER_ERROR headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.msg_fmt % kwargs except Exception: # kwargs doesn't match a variable in msg_fmt # log the issue and the kwargs LOG.exception('Exception in string format operation') for name, value in kwargs.items(): LOG.error("%(name)s: %(value)s", {'name': name, 'value': value}) if CONF.fatal_exception_format_errors: raise else: # at least get the core msg_fmt out if something happened message = self.msg_fmt super(WatcherException, self).__init__(message) def __str__(self): """Encode to utf-8 then wsme api can consume it as well""" return self.args[0] def __unicode__(self): return str(self.args[0]) def format_message(self): if self.__class__.__name__.endswith('_Remote'): return self.args[0] else: return str(self) class UnsupportedError(WatcherException): msg_fmt = _("Not supported") class NotAuthorized(WatcherException): msg_fmt = _("Not authorized") code = HTTPStatus.FORBIDDEN class NotAcceptable(WatcherException): msg_fmt = _("Request not acceptable.") code = HTTPStatus.NOT_ACCEPTABLE class PolicyNotAuthorized(NotAuthorized): msg_fmt = _("Policy doesn't allow %(action)s to be performed.") class OperationNotPermitted(NotAuthorized): msg_fmt = _("Operation not permitted") class Invalid(WatcherException, ValueError): msg_fmt = _("Unacceptable parameters") code = HTTPStatus.BAD_REQUEST class ObjectNotFound(WatcherException): msg_fmt = _("The %(name)s %(id)s could not be found") class Conflict(WatcherException): msg_fmt = _('Conflict') code = HTTPStatus.CONFLICT class ResourceNotFound(ObjectNotFound): msg_fmt = _("The %(name)s resource %(id)s could not be found") code = HTTPStatus.NOT_FOUND class InvalidParameter(Invalid): msg_fmt = _("%(parameter)s has to be of type %(parameter_type)s") class MissingParameter(Invalid): msg_fmt = _("%(parameter)s is required but missing. Check watcher.conf") class InvalidIdentity(Invalid): msg_fmt = _("Expected a uuid or int but received %(identity)s") class InvalidOperator(Invalid): msg_fmt = _("Filter operator is not valid: %(operator)s not " "in %(valid_operators)s") class InvalidGoal(Invalid): msg_fmt = _("Goal %(goal)s is invalid") class InvalidStrategy(Invalid): msg_fmt = _("Strategy %(strategy)s is invalid") class InvalidAudit(Invalid): msg_fmt = _("Audit %(audit)s is invalid") class EagerlyLoadedAuditRequired(InvalidAudit): msg_fmt = _("Audit %(audit)s was not eagerly loaded") class InvalidActionPlan(Invalid): msg_fmt = _("Action plan %(action_plan)s is invalid") class EagerlyLoadedActionPlanRequired(InvalidActionPlan): msg_fmt = _("Action plan %(action_plan)s was not eagerly loaded") class EagerlyLoadedActionRequired(InvalidActionPlan): msg_fmt = _("Action %(action)s was not eagerly loaded") class InvalidUUID(Invalid): msg_fmt = _("Expected a uuid but received %(uuid)s") class InvalidName(Invalid): msg_fmt = _("Expected a logical name but received %(name)s") class InvalidUuidOrName(Invalid): msg_fmt = _("Expected a logical name or uuid but received %(name)s") class InvalidIntervalOrCron(Invalid): msg_fmt = _("Expected an interval or cron syntax but received %(name)s") class DataModelTypeNotFound(ResourceNotFound): msg_fmt = _("The %(data_model_type)s data model could not be found") class GoalNotFound(ResourceNotFound): msg_fmt = _("Goal %(goal)s could not be found") class GoalAlreadyExists(Conflict): msg_fmt = _("A goal with UUID %(uuid)s already exists") class StrategyNotFound(ResourceNotFound): msg_fmt = _("Strategy %(strategy)s could not be found") class StrategyAlreadyExists(Conflict): msg_fmt = _("A strategy with UUID %(uuid)s already exists") class AuditTemplateNotFound(ResourceNotFound): msg_fmt = _("AuditTemplate %(audit_template)s could not be found") class AuditTemplateAlreadyExists(Conflict): msg_fmt = _("An audit_template with UUID or name %(audit_template)s " "already exists") class AuditTypeNotFound(Invalid): msg_fmt = _("Audit type %(audit_type)s could not be found") class AuditTypeNotAllowed(Invalid): msg_fmt = _("Audit type %(audit_type)s is disallowed.") class AuditStateNotAllowed(Invalid): msg_fmt = _("Audit state %(state)s is disallowed.") class AuditParameterNotAllowed(Invalid): msg_fmt = _("Audit parameter %(parameter)s are not allowed") class AuditNotFound(ResourceNotFound): msg_fmt = _("Audit %(audit)s could not be found") class AuditAlreadyExists(Conflict): msg_fmt = _("An audit with UUID or name %(audit)s already exists") class AuditIntervalNotSpecified(Invalid): msg_fmt = _("Interval of audit must be specified for %(audit_type)s.") class AuditIntervalNotAllowed(Invalid): msg_fmt = _("Interval of audit must not be set for %(audit_type)s.") class AuditStartEndTimeNotAllowed(Invalid): msg_fmt = _("Start or End time of audit must not be set for " "%(audit_type)s.") class AuditReferenced(Invalid): msg_fmt = _("Audit %(audit)s is referenced by one or multiple action " "plans") class ActionPlanNotFound(ResourceNotFound): msg_fmt = _("ActionPlan %(action_plan)s could not be found") class ActionPlanAlreadyExists(Conflict): msg_fmt = _("An action plan with UUID %(uuid)s already exists") class ActionPlanReferenced(Invalid): msg_fmt = _("Action Plan %(action_plan)s is referenced by one or " "multiple actions") class ActionPlanCancelled(WatcherException): msg_fmt = _("Action Plan with UUID %(uuid)s is cancelled by user") class ActionPlanIsOngoing(Conflict): msg_fmt = _("Action Plan %(action_plan)s is currently running.") class ActionNotFound(ResourceNotFound): msg_fmt = _("Action %(action)s could not be found") class ActionAlreadyExists(Conflict): msg_fmt = _("An action with UUID %(uuid)s already exists") class ActionReferenced(Invalid): msg_fmt = _("Action plan %(action_plan)s is referenced by one or " "multiple goals") class ActionFilterCombinationProhibited(Invalid): msg_fmt = _("Filtering actions on both audit and action-plan is " "prohibited") class UnsupportedActionType(UnsupportedError): msg_fmt = _("Provided %(action_type)s is not supported yet") class EfficacyIndicatorNotFound(ResourceNotFound): msg_fmt = _("Efficacy indicator %(efficacy_indicator)s could not be found") class EfficacyIndicatorAlreadyExists(Conflict): msg_fmt = _("An action with UUID %(uuid)s already exists") class ScoringEngineAlreadyExists(Conflict): msg_fmt = _("A scoring engine with UUID %(uuid)s already exists") class ScoringEngineNotFound(ResourceNotFound): msg_fmt = _("ScoringEngine %(scoring_engine)s could not be found") class HTTPNotFound(ResourceNotFound): pass class PatchError(Invalid): msg_fmt = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s") class DeleteError(Invalid): msg_fmt = _("Couldn't delete when state is '%(state)s'.") class StartError(Invalid): msg_fmt = _("Couldn't start when state is '%(state)s'.") # decision engine class WorkflowExecutionException(WatcherException): msg_fmt = _('Workflow execution error: %(error)s') class IllegalArgumentException(WatcherException): msg_fmt = _('Illegal argument') class AuthorizationFailure(WatcherException): msg_fmt = _('%(client)s connection failed. Reason: %(reason)s') class ClusterStateStale(WatcherException): msg_fmt = _("The cluster state is stale") class ClusterDataModelCollectionError(WatcherException): msg_fmt = _("The cluster data model '%(cdm)s' could not be built") class ClusterStateNotDefined(WatcherException): msg_fmt = _("The cluster state is not defined") class NoAvailableStrategyForGoal(WatcherException): msg_fmt = _("No strategy could be found to achieve the '%(goal)s' goal.") class InvalidIndicatorValue(WatcherException): msg_fmt = _("The indicator '%(name)s' with value '%(value)s' " "and spec type '%(spec_type)s' is invalid.") class GlobalEfficacyComputationError(WatcherException): msg_fmt = _("Could not compute the global efficacy for the '%(goal)s' " "goal using the '%(strategy)s' strategy.") class UnsupportedDataSource(UnsupportedError): msg_fmt = _("Datasource %(datasource)s is not supported " "by strategy %(strategy)s") class DataSourceNotAvailable(WatcherException): msg_fmt = _("Datasource %(datasource)s is not available.") class MetricNotAvailable(WatcherException): """Indicate that a metric is not configured or does not exists""" msg_fmt = _('Metric: %(metric)s not available') class NoDatasourceAvailable(WatcherException): """No datasources have been configured""" msg_fmt = _('No datasources available') class NoSuchMetricForHost(WatcherException): msg_fmt = _("No %(metric)s metric for %(host)s found.") class ServiceAlreadyExists(Conflict): msg_fmt = _("A service with name %(name)s is already working on %(host)s.") class ServiceNotFound(ResourceNotFound): msg_fmt = _("The service %(service)s cannot be found.") class WildcardCharacterIsUsed(WatcherException): msg_fmt = _("You shouldn't use any other IDs of %(resource)s if you use " "wildcard character.") class CronFormatIsInvalid(WatcherException): msg_fmt = _("Provided cron is invalid: %(message)s") class ActionDescriptionAlreadyExists(Conflict): msg_fmt = _("An action description with type %(action_type)s is " "already exist.") class ActionDescriptionNotFound(ResourceNotFound): msg_fmt = _("The action description %(action_id)s cannot be found.") class ActionExecutionFailure(WatcherException): msg_fmt = _("The action %(action_id)s execution failed.") # Model class ComputeResourceNotFound(WatcherException): msg_fmt = _("The compute resource '%(name)s' could not be found") class InstanceNotFound(ComputeResourceNotFound): msg_fmt = _("The instance '%(name)s' could not be found") class InstanceNotMapped(ComputeResourceNotFound): msg_fmt = _("The mapped compute node for instance '%(uuid)s' " "could not be found.") class ComputeNodeNotFound(ComputeResourceNotFound): msg_fmt = _("The compute node %(name)s could not be found") class StorageResourceNotFound(WatcherException): msg_fmt = _("The storage resource '%(name)s' could not be found") class StorageNodeNotFound(StorageResourceNotFound): msg_fmt = _("The storage node %(name)s could not be found") class PoolNotFound(StorageResourceNotFound): msg_fmt = _("The pool %(name)s could not be found") class VolumeNotFound(StorageResourceNotFound): msg_fmt = _("The volume '%(name)s' could not be found") class BaremetalResourceNotFound(WatcherException): msg_fmt = _("The baremetal resource '%(name)s' could not be found") class IronicNodeNotFound(BaremetalResourceNotFound): msg_fmt = _("The ironic node %(uuid)s could not be found") class LoadingError(WatcherException): msg_fmt = _("Error loading plugin '%(name)s'") class ReservedWord(WatcherException): msg_fmt = _("The identifier '%(name)s' is a reserved word") class NotSoftDeletedStateError(WatcherException): msg_fmt = _("The %(name)s resource %(id)s is not soft deleted") class NegativeLimitError(WatcherException): msg_fmt = _("Limit should be positive") class NotificationPayloadError(WatcherException): msg_fmt = _("Payload not populated when trying to send notification " "\"%(class_name)s\"") class InvalidPoolAttributeValue(Invalid): msg_fmt = _("The %(name)s pool %(attribute)s is not integer") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/ironic_helper.py0000664000175000017500000000311100000000000022772 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Authors:Yumeng Bao # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.common import clients from watcher.common import exception from watcher.common import utils LOG = log.getLogger(__name__) class IronicHelper(object): def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.ironic = self.osc.ironic() def get_ironic_node_list(self): return self.ironic.node.list() def get_ironic_node_by_uuid(self, node_uuid): """Get ironic node by node UUID""" try: node = self.ironic.node.get(utils.Struct(uuid=node_uuid)) if not node: raise exception.IronicNodeNotFound(uuid=node_uuid) except Exception as exc: LOG.exception(exc) raise exception.IronicNodeNotFound(uuid=node_uuid) # We need to pass an object with an 'uuid' attribute to make it work return node ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/keystone_helper.py0000664000175000017500000001060500000000000023356 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from keystoneauth1.exceptions import http as ks_exceptions from keystoneauth1 import loading from keystoneauth1 import session from watcher._i18n import _ from watcher.common import clients from watcher.common import exception from watcher import conf CONF = conf.CONF LOG = log.getLogger(__name__) class KeystoneHelper(object): def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.keystone = self.osc.keystone() def get_role(self, name_or_id): try: role = self.keystone.roles.get(name_or_id) return role except ks_exceptions.NotFound: roles = self.keystone.roles.list(name=name_or_id) if len(roles) == 0: raise exception.Invalid( message=(_("Role not Found: %s") % name_or_id)) if len(roles) > 1: raise exception.Invalid( message=(_("Role name seems ambiguous: %s") % name_or_id)) return roles[0] def get_user(self, name_or_id): try: user = self.keystone.users.get(name_or_id) return user except ks_exceptions.NotFound: users = self.keystone.users.list(name=name_or_id) if len(users) == 0: raise exception.Invalid( message=(_("User not Found: %s") % name_or_id)) if len(users) > 1: raise exception.Invalid( message=(_("User name seems ambiguous: %s") % name_or_id)) return users[0] def get_project(self, name_or_id): try: project = self.keystone.projects.get(name_or_id) return project except ks_exceptions.NotFound: projects = self.keystone.projects.list(name=name_or_id) if len(projects) == 0: raise exception.Invalid( message=(_("Project not Found: %s") % name_or_id)) if len(projects) > 1: raise exception.Invalid( message=(_("Project name seems ambiguous: %s") % name_or_id)) return projects[0] def get_domain(self, name_or_id): try: domain = self.keystone.domains.get(name_or_id) return domain except ks_exceptions.NotFound: domains = self.keystone.domains.list(name=name_or_id) if len(domains) == 0: raise exception.Invalid( message=(_("Domain not Found: %s") % name_or_id)) if len(domains) > 1: raise exception.Invalid( message=(_("Domain name seems ambiguous: %s") % name_or_id)) return domains[0] def create_session(self, user_id, password): user = self.get_user(user_id) loader = loading.get_plugin_loader('password') auth = loader.load_from_options( auth_url=CONF.watcher_clients_auth.auth_url, password=password, user_id=user_id, project_id=user.default_project_id) return session.Session(auth=auth) def create_user(self, user): project = self.get_project(user['project']) domain = self.get_domain(user['domain']) _user = self.keystone.users.create( user['name'], password=user['password'], domain=domain, project=project, ) for role in user['roles']: role = self.get_role(role) self.keystone.roles.grant( role.id, user=_user.id, project=project.id) return _user def delete_user(self, user): try: user = self.get_user(user) self.keystone.users.delete(user) except exception.Invalid: pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6271353 python_watcher-14.0.0/watcher/common/loader/0000775000175000017500000000000000000000000021050 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/loader/__init__.py0000664000175000017500000000000000000000000023147 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/loader/base.py0000664000175000017500000000151200000000000022333 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc class BaseLoader(object, metaclass=abc.ABCMeta): @abc.abstractmethod def list_available(self): raise NotImplementedError() @abc.abstractmethod def load(self, name): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/loader/default.py0000664000175000017500000000600400000000000023046 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log from stevedore import driver as drivermanager from stevedore import extension as extensionmanager from watcher.common import exception from watcher.common.loader import base from watcher.common import utils LOG = log.getLogger(__name__) class DefaultLoader(base.BaseLoader): def __init__(self, namespace, conf=cfg.CONF): """Entry point loader for Watcher using Stevedore :param namespace: namespace of the entry point(s) to load or list :type namespace: str :param conf: ConfigOpts instance, defaults to cfg.CONF """ super(DefaultLoader, self).__init__() self.namespace = namespace self.conf = conf def load(self, name, **kwargs): try: LOG.debug("Loading in namespace %s => %s ", self.namespace, name) driver_manager = drivermanager.DriverManager( namespace=self.namespace, name=name, invoke_on_load=False, ) driver_cls = driver_manager.driver config = self._load_plugin_config(name, driver_cls) driver = driver_cls(config, **kwargs) except Exception as exc: LOG.exception(exc) raise exception.LoadingError(name=name) return driver def _reload_config(self): self.conf(default_config_files=self.conf.default_config_files) def get_entry_name(self, name): return ".".join([self.namespace, name]) def _load_plugin_config(self, name, driver_cls): """Load the config of the plugin""" config = utils.Struct() config_opts = driver_cls.get_config_opts() if not config_opts: return config group_name = self.get_entry_name(name) self.conf.register_opts(config_opts, group=group_name) # Finalise the opt import by re-checking the configuration # against the provided config files self._reload_config() config_group = self.conf.get(group_name) if not config_group: raise exception.LoadingError(name=name) config.update({ name: value for name, value in config_group.items() }) return config def list_available(self): extension_manager = extensionmanager.ExtensionManager( namespace=self.namespace) return {ext.name: ext.plugin for ext in extension_manager.extensions} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/loader/loadable.py0000664000175000017500000000426000000000000023167 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from watcher.common import service class Loadable(object, metaclass=abc.ABCMeta): """Generic interface for dynamically loading a driver/entry point. This defines the contract in order to let the loader manager inject the configuration parameters during the loading. """ def __init__(self, config): super(Loadable, self).__init__() self.config = config @classmethod @abc.abstractmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ raise NotImplementedError LoadableSingletonMeta = type( "LoadableSingletonMeta", (abc.ABCMeta, service.Singleton), {}) class LoadableSingleton(object, metaclass=LoadableSingletonMeta): """Generic interface for dynamically loading a driver as a singleton. This defines the contract in order to let the loader manager inject the configuration parameters during the loading. Classes inheriting from this class will be singletons. """ def __init__(self, config): super(LoadableSingleton, self).__init__() self.config = config @classmethod @abc.abstractmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ raise NotImplementedError ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6271353 python_watcher-14.0.0/watcher/common/metal_helper/0000775000175000017500000000000000000000000022243 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/metal_helper/__init__.py0000664000175000017500000000000000000000000024342 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/metal_helper/base.py0000664000175000017500000000442600000000000023535 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from watcher.common import exception from watcher.common.metal_helper import constants as metal_constants class BaseMetalNode(abc.ABC): hv_up_when_powered_off = False def __init__(self, nova_node=None): self._nova_node = nova_node def get_hypervisor_node(self): if not self._nova_node: raise exception.Invalid(message="No associated hypervisor.") return self._nova_node def get_hypervisor_hostname(self): return self.get_hypervisor_node().hypervisor_hostname @abc.abstractmethod def get_power_state(self): # TODO(lpetrut): document the following methods pass @abc.abstractmethod def get_id(self): """Return the node id provided by the bare metal service.""" pass @abc.abstractmethod def power_on(self): pass @abc.abstractmethod def power_off(self): pass def set_power_state(self, state): state = metal_constants.PowerState(state) if state == metal_constants.PowerState.ON: self.power_on() elif state == metal_constants.PowerState.OFF: self.power_off() else: raise exception.UnsupportedActionType( "Cannot set power state: %s" % state) class BaseMetalHelper(abc.ABC): def __init__(self, osc): self._osc = osc @property def nova_client(self): if not getattr(self, "_nova_client", None): self._nova_client = self._osc.nova() return self._nova_client @abc.abstractmethod def list_compute_nodes(self): pass @abc.abstractmethod def get_node(self, node_id): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/metal_helper/constants.py0000664000175000017500000000136500000000000024636 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum class PowerState(str, enum.Enum): ON = "on" OFF = "off" UNKNOWN = "unknown" ERROR = "error" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/metal_helper/factory.py0000664000175000017500000000204400000000000024264 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from watcher.common import clients from watcher.common.metal_helper import ironic from watcher.common.metal_helper import maas CONF = cfg.CONF def get_helper(osc=None): # TODO(lpetrut): consider caching this client. if not osc: osc = clients.OpenStackClients() if CONF.maas_client.url: return maas.MaasHelper(osc) else: return ironic.IronicHelper(osc) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/metal_helper/ironic.py0000664000175000017500000000637200000000000024110 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from watcher.common.metal_helper import base from watcher.common.metal_helper import constants as metal_constants LOG = log.getLogger(__name__) POWER_STATES_MAP = { 'power on': metal_constants.PowerState.ON, 'power off': metal_constants.PowerState.OFF, # For now, we only use ON/OFF states 'rebooting': metal_constants.PowerState.ON, 'soft power off': metal_constants.PowerState.OFF, 'soft reboot': metal_constants.PowerState.ON, } class IronicNode(base.BaseMetalNode): hv_up_when_powered_off = True def __init__(self, ironic_node, nova_node, ironic_client): super().__init__(nova_node) self._ironic_client = ironic_client self._ironic_node = ironic_node def get_power_state(self): return POWER_STATES_MAP.get(self._ironic_node.power_state, metal_constants.PowerState.UNKNOWN) def get_id(self): return self._ironic_node.uuid def power_on(self): self._ironic_client.node.set_power_state(self.get_id(), "on") def power_off(self): self._ironic_client.node.set_power_state(self.get_id(), "off") class IronicHelper(base.BaseMetalHelper): @property def _client(self): if not getattr(self, "_cached_client", None): self._cached_client = self._osc.ironic() return self._cached_client def list_compute_nodes(self): out_list = [] # TODO(lpetrut): consider using "detailed=True" instead of making # an additional GET request per node node_list = self._client.node.list() for node in node_list: node_info = self._client.node.get(node.uuid) hypervisor_id = node_info.extra.get('compute_node_id', None) if hypervisor_id is None: LOG.warning('Cannot find compute_node_id in extra ' 'of ironic node %s', node.uuid) continue hypervisor_node = self.nova_client.hypervisors.get(hypervisor_id) if hypervisor_node is None: LOG.warning('Cannot find hypervisor %s', hypervisor_id) continue out_node = IronicNode(node, hypervisor_node, self._client) out_list.append(out_node) return out_list def get_node(self, node_id): ironic_node = self._client.node.get(node_id) compute_node_id = ironic_node.extra.get('compute_node_id') if compute_node_id: compute_node = self.nova_client.hypervisors.get(compute_node_id) else: compute_node = None return IronicNode(ironic_node, compute_node, self._client) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/metal_helper/maas.py0000664000175000017500000001045500000000000023543 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from watcher.common import exception from watcher.common.metal_helper import base from watcher.common.metal_helper import constants as metal_constants from watcher.common import utils CONF = cfg.CONF LOG = log.getLogger(__name__) try: from maas.client import enum as maas_enum except ImportError: maas_enum = None class MaasNode(base.BaseMetalNode): hv_up_when_powered_off = False def __init__(self, maas_node, nova_node, maas_client): super().__init__(nova_node) self._maas_client = maas_client self._maas_node = maas_node def get_power_state(self): maas_state = utils.async_compat_call( self._maas_node.query_power_state, timeout=CONF.maas_client.timeout) # python-libmaas may not be available, so we'll avoid a global # variable. power_states_map = { maas_enum.PowerState.ON: metal_constants.PowerState.ON, maas_enum.PowerState.OFF: metal_constants.PowerState.OFF, maas_enum.PowerState.ERROR: metal_constants.PowerState.ERROR, maas_enum.PowerState.UNKNOWN: metal_constants.PowerState.UNKNOWN, } return power_states_map.get(maas_state, metal_constants.PowerState.UNKNOWN) def get_id(self): return self._maas_node.system_id def power_on(self): LOG.info("Powering on MAAS node: %s %s", self._maas_node.fqdn, self._maas_node.system_id) utils.async_compat_call( self._maas_node.power_on, timeout=CONF.maas_client.timeout) def power_off(self): LOG.info("Powering off MAAS node: %s %s", self._maas_node.fqdn, self._maas_node.system_id) utils.async_compat_call( self._maas_node.power_off, timeout=CONF.maas_client.timeout) class MaasHelper(base.BaseMetalHelper): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not maas_enum: raise exception.UnsupportedError( "MAAS client unavailable. Please install python-libmaas.") @property def _client(self): if not getattr(self, "_cached_client", None): self._cached_client = self._osc.maas() return self._cached_client def list_compute_nodes(self): out_list = [] node_list = utils.async_compat_call( self._client.machines.list, timeout=CONF.maas_client.timeout) compute_nodes = self.nova_client.hypervisors.list() compute_node_map = dict() for compute_node in compute_nodes: compute_node_map[compute_node.hypervisor_hostname] = compute_node for node in node_list: hypervisor_node = compute_node_map.get(node.fqdn) if not hypervisor_node: LOG.info('Cannot find hypervisor %s', node.fqdn) continue out_node = MaasNode(node, hypervisor_node, self._client) out_list.append(out_node) return out_list def _get_compute_node_by_hostname(self, hostname): compute_nodes = self.nova_client.hypervisors.search( hostname, detailed=True) for compute_node in compute_nodes: if compute_node.hypervisor_hostname == hostname: return compute_node def get_node(self, node_id): maas_node = utils.async_compat_call( self._client.machines.get, node_id, timeout=CONF.maas_client.timeout) compute_node = self._get_compute_node_by_hostname(maas_node.fqdn) return MaasNode(maas_node, compute_node, self._client) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/nova_helper.py0000664000175000017500000007173200000000000022470 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from novaclient import api_versions from oslo_log import log import glanceclient.exc as glexceptions import novaclient.exceptions as nvexceptions from watcher.common import clients from watcher.common import exception from watcher import conf LOG = log.getLogger(__name__) CONF = conf.CONF class NovaHelper(object): def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.neutron = self.osc.neutron() self.cinder = self.osc.cinder() self.nova = self.osc.nova() self.glance = self.osc.glance() def get_compute_node_list(self): hypervisors = self.nova.hypervisors.list() # filter out baremetal nodes from hypervisors compute_nodes = [node for node in hypervisors if node.hypervisor_type != 'ironic'] return compute_nodes def get_compute_node_by_name(self, node_name, servers=False, detailed=False): """Search for a hypervisor (compute node) by hypervisor_hostname :param node_name: The hypervisor_hostname to search :param servers: If true, include information about servers per hypervisor :param detailed: If true, include information about the compute service per hypervisor (requires microversion 2.53) """ return self.nova.hypervisors.search(node_name, servers=servers, detailed=detailed) def get_compute_node_by_hostname(self, node_hostname): """Get compute node by hostname :param node_hostname: Compute service hostname :returns: novaclient.v2.hypervisors.Hypervisor object if found :raises: ComputeNodeNotFound if no hypervisor is found for the compute service hostname or there was an error communicating with nova """ try: # This is a fuzzy match on hypervisor_hostname so we could get back # more than one compute node. If so, match on the compute service # hostname. compute_nodes = self.get_compute_node_by_name( node_hostname, detailed=True) for cn in compute_nodes: if cn.service['host'] == node_hostname: return cn raise exception.ComputeNodeNotFound(name=node_hostname) except Exception as exc: LOG.exception(exc) raise exception.ComputeNodeNotFound(name=node_hostname) def get_compute_node_by_uuid(self, node_uuid): """Get compute node by uuid :param node_uuid: hypervisor id as uuid after microversion 2.53 :returns: novaclient.v2.hypervisors.Hypervisor object if found """ return self.nova.hypervisors.get(node_uuid) def get_instance_list(self, filters=None, marker=None, limit=-1): """List servers for all tenants with details. This always gets servers with the all_tenants=True filter. :param filters: Dict of additional filters (optional). :param marker: Get servers that appear later in the server list than that represented by this server id (optional). :param limit: Maximum number of servers to return (optional). If limit == -1, all servers will be returned, note that limit == -1 will have a performance penalty. For details, please see: https://bugs.launchpad.net/watcher/+bug/1834679 :returns: list of novaclient Server objects """ search_opts = {'all_tenants': True} if filters: search_opts.update(filters) return self.nova.servers.list(search_opts=search_opts, marker=marker, limit=limit) def get_instance_by_uuid(self, instance_uuid): return [instance for instance in self.nova.servers.list(search_opts={"all_tenants": True, "uuid": instance_uuid})] def get_instance_by_name(self, instance_name): return [instance for instance in self.nova.servers.list(search_opts={"all_tenants": True, "name": instance_name})] def get_instances_by_node(self, host): return [instance for instance in self.nova.servers.list(search_opts={"all_tenants": True, "host": host}, limit=-1)] def get_flavor_list(self): return self.nova.flavors.list(**{'is_public': None}) def get_service(self, service_id): return self.nova.services.find(id=service_id) def get_aggregate_list(self): return self.nova.aggregates.list() def get_aggregate_detail(self, aggregate_id): return self.nova.aggregates.get(aggregate_id) def get_availability_zone_list(self): return self.nova.availability_zones.list(detailed=True) def get_service_list(self): return self.nova.services.list(binary='nova-compute') def find_instance(self, instance_id): return self.nova.servers.get(instance_id) def confirm_resize(self, instance, previous_status, retry=60): instance.confirm_resize() instance = self.nova.servers.get(instance.id) while instance.status != previous_status and retry: instance = self.nova.servers.get(instance.id) retry -= 1 time.sleep(1) if instance.status == previous_status: return True else: LOG.debug("confirm resize failed for the " "instance %s", instance.id) return False def wait_for_volume_status(self, volume, status, timeout=60, poll_interval=1): """Wait until volume reaches given status. :param volume: volume resource :param status: expected status of volume :param timeout: timeout in seconds :param poll_interval: poll interval in seconds """ start_time = time.time() while time.time() - start_time < timeout: volume = self.cinder.volumes.get(volume.id) if volume.status == status: break time.sleep(poll_interval) else: raise Exception("Volume %s did not reach status %s after %d s" % (volume.id, status, timeout)) return volume.status == status def watcher_non_live_migrate_instance(self, instance_id, dest_hostname, retry=120): """This method migrates a given instance This method uses the Nova built-in migrate() action to do a migration of a given instance. For migrating a given dest_hostname, Nova API version must be 2.56 or higher. It returns True if the migration was successful, False otherwise. :param instance_id: the unique id of the instance to migrate. :param dest_hostname: the name of the destination compute node, if destination_node is None, nova scheduler choose the destination host """ LOG.debug( "Trying a cold migrate of instance '%s' ", instance_id) # Looking for the instance to migrate instance = self.find_instance(instance_id) if not instance: LOG.debug("Instance %s not found !", instance_id) return False else: host_name = getattr(instance, "OS-EXT-SRV-ATTR:host") LOG.debug( "Instance %(instance)s found on host '%(host)s'.", {'instance': instance_id, 'host': host_name}) previous_status = getattr(instance, 'status') instance.migrate(host=dest_hostname) instance = self.nova.servers.get(instance_id) while (getattr(instance, 'status') not in ["VERIFY_RESIZE", "ERROR"] and retry): instance = self.nova.servers.get(instance.id) time.sleep(2) retry -= 1 new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host') if (host_name != new_hostname and instance.status == 'VERIFY_RESIZE'): if not self.confirm_resize(instance, previous_status): return False LOG.debug( "cold migration succeeded : " "instance %(instance)s is now on host '%(host)s'.", {'instance': instance_id, 'host': new_hostname}) return True else: LOG.debug( "cold migration for instance %s failed", instance_id) return False def resize_instance(self, instance_id, flavor, retry=120): """This method resizes given instance with specified flavor. This method uses the Nova built-in resize() action to do a resize of a given instance. It returns True if the resize was successful, False otherwise. :param instance_id: the unique id of the instance to resize. :param flavor: the name or ID of the flavor to resize to. """ LOG.debug( "Trying a resize of instance %(instance)s to " "flavor '%(flavor)s'", {'instance': instance_id, 'flavor': flavor}) # Looking for the instance to resize instance = self.find_instance(instance_id) flavor_id = None try: flavor_id = self.nova.flavors.get(flavor).id except nvexceptions.NotFound: flavor_id = [f.id for f in self.nova.flavors.list() if f.name == flavor][0] except nvexceptions.ClientException as e: LOG.debug("Nova client exception occurred while resizing " "instance %s. Exception: %s", instance_id, e) if not flavor_id: LOG.debug("Flavor not found: %s", flavor) return False if not instance: LOG.debug("Instance not found: %s", instance_id) return False instance_status = getattr(instance, 'OS-EXT-STS:vm_state') LOG.debug( "Instance %(id)s is in '%(status)s' status.", {'id': instance_id, 'status': instance_status}) instance.resize(flavor=flavor_id) while getattr(instance, 'OS-EXT-STS:vm_state') != 'resized' \ and retry: instance = self.nova.servers.get(instance.id) LOG.debug('Waiting the resize of %s to %s', instance, flavor_id) time.sleep(1) retry -= 1 instance_status = getattr(instance, 'status') if instance_status != 'VERIFY_RESIZE': return False instance.confirm_resize() LOG.debug("Resizing succeeded : instance %s is now on flavor " "'%s'.", instance_id, flavor_id) return True def live_migrate_instance(self, instance_id, dest_hostname, retry=120): """This method does a live migration of a given instance This method uses the Nova built-in live_migrate() action to do a live migration of a given instance. It returns True if the migration was successful, False otherwise. :param instance_id: the unique id of the instance to migrate. :param dest_hostname: the name of the destination compute node, if destination_node is None, nova scheduler choose the destination host """ LOG.debug( "Trying a live migrate instance %(instance)s ", {'instance': instance_id}) # Looking for the instance to migrate instance = self.find_instance(instance_id) if not instance: LOG.debug("Instance not found: %s", instance_id) return False else: host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') LOG.debug( "Instance %(instance)s found on host '%(host)s'.", {'instance': instance_id, 'host': host_name}) # From nova api version 2.25(Mitaka release), the default value of # block_migration is None which is mapped to 'auto'. instance.live_migrate(host=dest_hostname) instance = self.nova.servers.get(instance_id) # NOTE: If destination host is not specified for live migration # let nova scheduler choose the destination host. if dest_hostname is None: while (instance.status not in ['ACTIVE', 'ERROR'] and retry): instance = self.nova.servers.get(instance.id) LOG.debug('Waiting the migration of %s', instance.id) time.sleep(1) retry -= 1 new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host') if host_name != new_hostname and instance.status == 'ACTIVE': LOG.debug( "Live migration succeeded : " "instance %(instance)s is now on host '%(host)s'.", {'instance': instance_id, 'host': new_hostname}) return True else: return False while getattr(instance, 'OS-EXT-SRV-ATTR:host') != dest_hostname \ and retry: instance = self.nova.servers.get(instance.id) if not getattr(instance, 'OS-EXT-STS:task_state'): LOG.debug("Instance task state: %s is null", instance_id) break LOG.debug('Waiting the migration of %s to %s', instance, getattr(instance, 'OS-EXT-SRV-ATTR:host')) time.sleep(1) retry -= 1 host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') if host_name != dest_hostname: return False LOG.debug( "Live migration succeeded : " "instance %(instance)s is now on host '%(host)s'.", {'instance': instance_id, 'host': host_name}) return True def abort_live_migrate(self, instance_id, source, destination, retry=240): LOG.debug("Aborting live migration of instance %s", instance_id) migration = self.get_running_migration(instance_id) if migration: migration_id = getattr(migration[0], "id") try: self.nova.server_migrations.live_migration_abort( server=instance_id, migration=migration_id) except exception as e: # Note: Does not return from here, as abort request can't be # accepted but migration still going on. LOG.exception(e) else: LOG.debug( "No running migrations found for instance %s", instance_id) while retry: instance = self.nova.servers.get(instance_id) if (getattr(instance, 'OS-EXT-STS:task_state') is None and getattr(instance, 'status') in ['ACTIVE', 'ERROR']): break time.sleep(2) retry -= 1 instance_host = getattr(instance, 'OS-EXT-SRV-ATTR:host') instance_status = getattr(instance, 'status') # Abort live migration successful, action is cancelled if instance_host == source and instance_status == 'ACTIVE': return True # Nova Unable to abort live migration, action is succeeded elif instance_host == destination and instance_status == 'ACTIVE': return False else: raise Exception("Live migration execution and abort both failed " "for the instance %s" % instance_id) def enable_service_nova_compute(self, hostname): if float(CONF.nova_client.api_version) < 2.53: status = self.nova.services.enable( host=hostname, binary='nova-compute').status == 'enabled' else: service_uuid = self.nova.services.list(host=hostname, binary='nova-compute')[0].id status = self.nova.services.enable( service_uuid=service_uuid).status == 'enabled' return status def disable_service_nova_compute(self, hostname, reason=None): if float(CONF.nova_client.api_version) < 2.53: status = self.nova.services.disable_log_reason( host=hostname, binary='nova-compute', reason=reason).status == 'disabled' else: service_uuid = self.nova.services.list(host=hostname, binary='nova-compute')[0].id status = self.nova.services.disable_log_reason( service_uuid=service_uuid, reason=reason).status == 'disabled' return status def create_image_from_instance(self, instance_id, image_name, metadata={"reason": "instance_migrate"}): """This method creates a new image from a given instance. It waits for this image to be in 'active' state before returning. It returns the unique UUID of the created image if successful, None otherwise. :param instance_id: the uniqueid of the instance to backup as an image. :param image_name: the name of the image to create. :param metadata: a dictionary containing the list of key-value pairs to associate to the image as metadata. """ LOG.debug( "Trying to create an image from instance %s ...", instance_id) # Looking for the instance instance = self.find_instance(instance_id) if not instance: LOG.debug("Instance not found: %s", instance_id) return None else: host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') LOG.debug( "Instance %(instance)s found on host '%(host)s'.", {'instance': instance_id, 'host': host_name}) # We need to wait for an appropriate status # of the instance before we can build an image from it if self.wait_for_instance_status(instance, ('ACTIVE', 'SHUTOFF'), 5, 10): image_uuid = self.nova.servers.create_image(instance_id, image_name, metadata) image = self.glance.images.get(image_uuid) if not image: return None # Waiting for the new image to be officially in ACTIVE state # in order to make sure it can be used status = image.status retry = 10 while status != 'active' and status != 'error' and retry: time.sleep(5) retry -= 1 # Retrieve the instance again so the status field updates image = self.glance.images.get(image_uuid) if not image: break status = image.status LOG.debug("Current image status: %s", status) if not image: LOG.debug("Image not found: %s", image_uuid) else: LOG.debug( "Image %(image)s successfully created for " "instance %(instance)s", {'image': image_uuid, 'instance': instance_id}) return image_uuid return None def delete_instance(self, instance_id): """This method deletes a given instance. :param instance_id: the unique id of the instance to delete. """ LOG.debug("Trying to remove instance %s ...", instance_id) instance = self.find_instance(instance_id) if not instance: LOG.debug("Instance not found: %s", instance_id) return False else: self.nova.servers.delete(instance_id) LOG.debug("Instance %s removed.", instance_id) return True def stop_instance(self, instance_id): """This method stops a given instance. :param instance_id: the unique id of the instance to stop. """ LOG.debug("Trying to stop instance %s ...", instance_id) instance = self.find_instance(instance_id) if not instance: LOG.debug("Instance not found: %s", instance_id) return False elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped": LOG.debug("Instance has been stopped: %s", instance_id) return True else: self.nova.servers.stop(instance_id) if self.wait_for_instance_state(instance, "stopped", 8, 10): LOG.debug("Instance %s stopped.", instance_id) return True else: return False def wait_for_instance_state(self, server, state, retry, sleep): """Waits for server to be in a specific state The state can be one of the following : active, stopped :param server: server object. :param state: for which state we are waiting for :param retry: how many times to retry :param sleep: seconds to sleep between the retries """ if not server: return False while getattr(server, 'OS-EXT-STS:vm_state') != state and retry: time.sleep(sleep) server = self.nova.servers.get(server) retry -= 1 return getattr(server, 'OS-EXT-STS:vm_state') == state def wait_for_instance_status(self, instance, status_list, retry, sleep): """Waits for instance to be in a specific status The status can be one of the following : BUILD, ACTIVE, ERROR, VERIFY_RESIZE, SHUTOFF :param instance: instance object. :param status_list: tuple containing the list of status we are waiting for :param retry: how many times to retry :param sleep: seconds to sleep between the retries """ if not instance: return False while instance.status not in status_list and retry: LOG.debug("Current instance status: %s", instance.status) time.sleep(sleep) instance = self.nova.servers.get(instance.id) retry -= 1 LOG.debug("Current instance status: %s", instance.status) return instance.status in status_list def create_instance(self, node_id, inst_name="test", image_id=None, flavor_name="m1.tiny", sec_group_list=["default"], network_names_list=["demo-net"], keypair_name="mykeys", create_new_floating_ip=True, block_device_mapping_v2=None): """This method creates a new instance It also creates, if requested, a new floating IP and associates it with the new instance It returns the unique id of the created instance. """ LOG.debug( "Trying to create new instance '%(inst)s' " "from image '%(image)s' with flavor '%(flavor)s' ...", {'inst': inst_name, 'image': image_id, 'flavor': flavor_name}) try: self.nova.keypairs.findall(name=keypair_name) except nvexceptions.NotFound: LOG.debug("Key pair '%s' not found ", keypair_name) return try: image = self.glance.images.get(image_id) except glexceptions.NotFound: LOG.debug("Image '%s' not found ", image_id) return try: flavor = self.nova.flavors.find(name=flavor_name) except nvexceptions.NotFound: LOG.debug("Flavor '%s' not found ", flavor_name) return # Make sure all security groups exist for sec_group_name in sec_group_list: group_id = self.get_security_group_id_from_name(sec_group_name) if not group_id: LOG.debug("Security group '%s' not found ", sec_group_name) return net_list = list() for network_name in network_names_list: nic_id = self.get_network_id_from_name(network_name) if not nic_id: LOG.debug("Network '%s' not found ", network_name) return net_obj = {"net-id": nic_id} net_list.append(net_obj) # get availability zone of destination host azone = self.nova.services.list(host=node_id, binary='nova-compute')[0].zone instance = self.nova.servers.create( inst_name, image, flavor=flavor, key_name=keypair_name, security_groups=sec_group_list, nics=net_list, block_device_mapping_v2=block_device_mapping_v2, availability_zone="%s:%s" % (azone, node_id)) # Poll at 5 second intervals, until the status is no longer 'BUILD' if instance: if self.wait_for_instance_status(instance, ('ACTIVE', 'ERROR'), 5, 10): instance = self.nova.servers.get(instance.id) if create_new_floating_ip and instance.status == 'ACTIVE': LOG.debug( "Creating a new floating IP" " for instance '%s'", instance.id) # Creating floating IP for the new instance floating_ip = self.nova.floating_ips.create() instance.add_floating_ip(floating_ip) LOG.debug( "Instance %(instance)s associated to " "Floating IP '%(ip)s'", {'instance': instance.id, 'ip': floating_ip.ip}) return instance def get_security_group_id_from_name(self, group_name="default"): """This method returns the security group of the provided group name""" security_groups = self.neutron.list_security_groups(name=group_name) security_group_id = security_groups['security_groups'][0]['id'] return security_group_id def get_network_id_from_name(self, net_name="private"): """This method returns the unique id of the provided network name""" networks = self.neutron.list_networks(name=net_name) # LOG.debug(networks) network_id = networks['networks'][0]['id'] return network_id def get_hostname(self, instance): return str(getattr(instance, 'OS-EXT-SRV-ATTR:host')) def get_running_migration(self, instance_id): return self.nova.server_migrations.list(server=instance_id) def swap_volume(self, old_volume, new_volume, retry=120, retry_interval=10): """Swap old_volume for new_volume""" attachments = old_volume.attachments instance_id = attachments[0]['server_id'] # do volume update self.nova.volumes.update_server_volume( instance_id, old_volume.id, new_volume.id) while getattr(new_volume, 'status') != 'in-use' and retry: new_volume = self.cinder.volumes.get(new_volume.id) LOG.debug('Waiting volume update to %s', new_volume) time.sleep(retry_interval) retry -= 1 LOG.debug("retry count: %s", retry) if getattr(new_volume, 'status') != "in-use": LOG.error("Volume update retry timeout or error") return False host_name = getattr(new_volume, "os-vol-host-attr:host") LOG.debug( "Volume update succeeded : " "Volume %s is now on host '%s'.", (new_volume.id, host_name)) return True def _check_nova_api_version(self, client, version): api_version = api_versions.APIVersion(version_str=version) try: api_versions.discover_version(client, api_version) return True except nvexceptions.UnsupportedVersion as e: LOG.exception(e) return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/paths.py0000664000175000017500000000223200000000000021272 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from watcher import conf CONF = conf.CONF def basedir_rel(*args): """Return a path relative to $pybasedir.""" return os.path.join(CONF.pybasedir, *args) def bindir_rel(*args): """Return a path relative to $bindir.""" return os.path.join(CONF.bindir, *args) def state_path_rel(*args): """Return a path relative to $state_path.""" return os.path.join(CONF.state_path, *args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/placement_helper.py0000664000175000017500000001477400000000000023500 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus from oslo_config import cfg from oslo_log import log as logging from watcher.common import clients CONF = cfg.CONF LOG = logging.getLogger(__name__) class PlacementHelper(object): def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self._placement = self.osc.placement() def get(self, url): return self._placement.get(url, raise_exc=False) @staticmethod def get_error_msg(resp): json_resp = resp.json() # https://docs.openstack.org/api-ref/placement/#errors if 'errors' in json_resp: error_msg = json_resp['errors'][0].get('detail') else: error_msg = resp.text return error_msg def get_resource_providers(self, rp_name=None): """Calls the placement API for a resource provider record. :param rp_name: Name of the resource provider, if None, list all resource providers. :return: A list of resource providers information or None if the resource provider doesn't exist. """ url = '/resource_providers' if rp_name: url += '?name=%s' % rp_name resp = self.get(url) if resp.status_code == HTTPStatus.OK: json_resp = resp.json() return json_resp['resource_providers'] if rp_name: msg = "Failed to get resource provider %(name)s. " else: msg = "Failed to get all resource providers. " msg += "Got %(status_code)d: %(err_text)s." args = { 'name': rp_name, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } LOG.error(msg, args) def get_inventories(self, rp_uuid): """Calls the placement API to get resource inventory information. :param rp_uuid: UUID of the resource provider to get. :return: A dictionary of inventories keyed by resource classes. """ url = '/resource_providers/%s/inventories' % rp_uuid resp = self.get(url) if resp.status_code == HTTPStatus.OK: json = resp.json() return json['inventories'] msg = ("Failed to get resource provider %(rp_uuid)s inventories. " "Got %(status_code)d: %(err_text)s.") args = { 'rp_uuid': rp_uuid, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } LOG.error(msg, args) def get_provider_traits(self, rp_uuid): """Queries the placement API for a resource provider's traits. :param rp_uuid: UUID of the resource provider to grab traits for. :return: A list of traits. """ resp = self.get("/resource_providers/%s/traits" % rp_uuid) if resp.status_code == HTTPStatus.OK: json = resp.json() return json['traits'] msg = ("Failed to get resource provider %(rp_uuid)s traits. " "Got %(status_code)d: %(err_text)s.") args = { 'rp_uuid': rp_uuid, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } LOG.error(msg, args) def get_allocations_for_consumer(self, consumer_uuid): """Retrieves the allocations for a specific consumer. :param consumer_uuid: the UUID of the consumer resource. :return: A dictionary of allocation records keyed by resource provider uuid. """ url = '/allocations/%s' % consumer_uuid resp = self.get(url) if resp.status_code == HTTPStatus.OK: json = resp.json() return json['allocations'] msg = ("Failed to get allocations for consumer %(c_uuid). " "Got %(status_code)d: %(err_text)s.") args = { 'c_uuid': consumer_uuid, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } LOG.error(msg, args) def get_usages_for_resource_provider(self, rp_uuid): """Retrieves the usages for a specific provider. :param rp_uuid: The UUID of the provider. :return: A dictionary that describes how much each class of resource is being consumed on this resource provider. """ url = '/resource_providers/%s/usages' % rp_uuid resp = self.get(url) if resp.status_code == HTTPStatus.OK: json = resp.json() return json['usages'] msg = ("Failed to get resource provider %(rp_uuid)s usages. " "Got %(status_code)d: %(err_text)s.") args = { 'rp_uuid': rp_uuid, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } LOG.error(msg, args) def get_candidate_providers(self, resources): """Returns a dictionary of resource provider summaries. :param resources: A comma-separated list of strings indicating an amount of resource of a specified class that providers in each allocation request must collectively have the capacity and availability to serve: resources=VCPU:4,DISK_GB:64,MEMORY_MB:2048 :returns: A dict, keyed by resource provider UUID, which can provide the required resources. """ url = "/allocation_candidates?%s" % resources resp = self.get(url) if resp.status_code == HTTPStatus.OK: data = resp.json() return data['provider_summaries'] args = { 'resource_request': resources, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } msg = ("Failed to get allocation candidates from placement " "API for resources: %(resource_request)s\n" "Got %(status_code)d: %(err_text)s.") LOG.error(msg, args) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6271353 python_watcher-14.0.0/watcher/common/policies/0000775000175000017500000000000000000000000021411 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policies/__init__.py0000664000175000017500000000255000000000000023524 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from watcher.common.policies import action from watcher.common.policies import action_plan from watcher.common.policies import audit from watcher.common.policies import audit_template from watcher.common.policies import base from watcher.common.policies import data_model from watcher.common.policies import goal from watcher.common.policies import scoring_engine from watcher.common.policies import service from watcher.common.policies import strategy def list_rules(): return itertools.chain( base.list_rules(), action.list_rules(), action_plan.list_rules(), audit.list_rules(), audit_template.list_rules(), data_model.list_rules(), goal.list_rules(), scoring_engine.list_rules(), service.list_rules(), strategy.list_rules(), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policies/action.py0000664000175000017500000000312100000000000023235 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base ACTION = 'action:%s' rules = [ policy.DocumentedRuleDefault( name=ACTION % 'detail', check_str=base.RULE_ADMIN_API, description='Retrieve a list of actions with detail.', operations=[ { 'path': '/v1/actions/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION % 'get', check_str=base.RULE_ADMIN_API, description='Retrieve information about a given action.', operations=[ { 'path': '/v1/actions/{action_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION % 'get_all', check_str=base.RULE_ADMIN_API, description='Retrieve a list of all actions.', operations=[ { 'path': '/v1/actions', 'method': 'GET' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policies/action_plan.py0000664000175000017500000000504500000000000024256 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base ACTION_PLAN = 'action_plan:%s' rules = [ policy.DocumentedRuleDefault( name=ACTION_PLAN % 'delete', check_str=base.RULE_ADMIN_API, description='Delete an action plan.', operations=[ { 'path': '/v1/action_plans/{action_plan_uuid}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=ACTION_PLAN % 'detail', check_str=base.RULE_ADMIN_API, description='Retrieve a list of action plans with detail.', operations=[ { 'path': '/v1/action_plans/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION_PLAN % 'get', check_str=base.RULE_ADMIN_API, description='Get an action plan.', operations=[ { 'path': '/v1/action_plans/{action_plan_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION_PLAN % 'get_all', check_str=base.RULE_ADMIN_API, description='Get all action plans.', operations=[ { 'path': '/v1/action_plans', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION_PLAN % 'update', check_str=base.RULE_ADMIN_API, description='Update an action plans.', operations=[ { 'path': '/v1/action_plans/{action_plan_uuid}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=ACTION_PLAN % 'start', check_str=base.RULE_ADMIN_API, description='Start an action plans.', operations=[ { 'path': '/v1/action_plans/{action_plan_uuid}/start', 'method': 'POST' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policies/audit.py0000664000175000017500000000460200000000000023073 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base AUDIT = 'audit:%s' rules = [ policy.DocumentedRuleDefault( name=AUDIT % 'create', check_str=base.RULE_ADMIN_API, description='Create a new audit.', operations=[ { 'path': '/v1/audits', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=AUDIT % 'delete', check_str=base.RULE_ADMIN_API, description='Delete an audit.', operations=[ { 'path': '/v1/audits/{audit_uuid}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=AUDIT % 'detail', check_str=base.RULE_ADMIN_API, description='Retrieve audit list with details.', operations=[ { 'path': '/v1/audits/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT % 'get', check_str=base.RULE_ADMIN_API, description='Get an audit.', operations=[ { 'path': '/v1/audits/{audit_uuid}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT % 'get_all', check_str=base.RULE_ADMIN_API, description='Get all audits.', operations=[ { 'path': '/v1/audits', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT % 'update', check_str=base.RULE_ADMIN_API, description='Update an audit.', operations=[ { 'path': '/v1/audits/{audit_uuid}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policies/audit_template.py0000664000175000017500000000513600000000000024771 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base AUDIT_TEMPLATE = 'audit_template:%s' rules = [ policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'create', check_str=base.RULE_ADMIN_API, description='Create an audit template.', operations=[ { 'path': '/v1/audit_templates', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'delete', check_str=base.RULE_ADMIN_API, description='Delete an audit template.', operations=[ { 'path': '/v1/audit_templates/{audit_template_uuid}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'detail', check_str=base.RULE_ADMIN_API, description='Retrieve a list of audit templates with details.', operations=[ { 'path': '/v1/audit_templates/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'get', check_str=base.RULE_ADMIN_API, description='Get an audit template.', operations=[ { 'path': '/v1/audit_templates/{audit_template_uuid}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'get_all', check_str=base.RULE_ADMIN_API, description='Get a list of all audit templates.', operations=[ { 'path': '/v1/audit_templates', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'update', check_str=base.RULE_ADMIN_API, description='Update an audit template.', operations=[ { 'path': '/v1/audit_templates/{audit_template_uuid}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policies/base.py0000664000175000017500000000165200000000000022701 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy RULE_ADMIN_API = 'rule:admin_api' ROLE_ADMIN_OR_ADMINISTRATOR = 'role:admin or role:administrator' ALWAYS_DENY = '!' rules = [ policy.RuleDefault( name='admin_api', check_str=ROLE_ADMIN_OR_ADMINISTRATOR ), policy.RuleDefault( name='show_password', check_str=ALWAYS_DENY ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policies/data_model.py0000664000175000017500000000203400000000000024053 0ustar00zuulzuul00000000000000# Copyright 2019 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base DATA_MODEL = 'data_model:%s' rules = [ policy.DocumentedRuleDefault( name=DATA_MODEL % 'get_all', check_str=base.RULE_ADMIN_API, description='List data model.', operations=[ { 'path': '/v1/data_model', 'method': 'GET' } ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policies/goal.py0000664000175000017500000000301700000000000022706 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base GOAL = 'goal:%s' rules = [ policy.DocumentedRuleDefault( name=GOAL % 'detail', check_str=base.RULE_ADMIN_API, description='Retrieve a list of goals with detail.', operations=[ { 'path': '/v1/goals/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=GOAL % 'get', check_str=base.RULE_ADMIN_API, description='Get a goal.', operations=[ { 'path': '/v1/goals/{goal_uuid}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=GOAL % 'get_all', check_str=base.RULE_ADMIN_API, description='Get all goals.', operations=[ { 'path': '/v1/goals', 'method': 'GET' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policies/scoring_engine.py0000664000175000017500000000413000000000000024752 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base SCORING_ENGINE = 'scoring_engine:%s' rules = [ # FIXME(lbragstad): Find someone from watcher to double check this # information. This API isn't listed in watcher's API reference # documentation. policy.DocumentedRuleDefault( name=SCORING_ENGINE % 'detail', check_str=base.RULE_ADMIN_API, description='List scoring engines with details.', operations=[ { 'path': '/v1/scoring_engines/detail', 'method': 'GET' } ] ), # FIXME(lbragstad): Find someone from watcher to double check this # information. This API isn't listed in watcher's API reference # documentation. policy.DocumentedRuleDefault( name=SCORING_ENGINE % 'get', check_str=base.RULE_ADMIN_API, description='Get a scoring engine.', operations=[ { 'path': '/v1/scoring_engines/{scoring_engine_id}', 'method': 'GET' } ] ), # FIXME(lbragstad): Find someone from watcher to double check this # information. This API isn't listed in watcher's API reference # documentation. policy.DocumentedRuleDefault( name=SCORING_ENGINE % 'get_all', check_str=base.RULE_ADMIN_API, description='Get all scoring engines.', operations=[ { 'path': '/v1/scoring_engines', 'method': 'GET' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policies/service.py0000664000175000017500000000305100000000000023422 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base SERVICE = 'service:%s' rules = [ policy.DocumentedRuleDefault( name=SERVICE % 'detail', check_str=base.RULE_ADMIN_API, description='List services with detail.', operations=[ { 'path': '/v1/services/', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=SERVICE % 'get', check_str=base.RULE_ADMIN_API, description='Get a specific service.', operations=[ { 'path': '/v1/services/{service_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=SERVICE % 'get_all', check_str=base.RULE_ADMIN_API, description='List all services.', operations=[ { 'path': '/v1/services/', 'method': 'GET' } ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policies/strategy.py0000664000175000017500000000356000000000000023631 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base STRATEGY = 'strategy:%s' rules = [ policy.DocumentedRuleDefault( name=STRATEGY % 'detail', check_str=base.RULE_ADMIN_API, description='List strategies with detail.', operations=[ { 'path': '/v1/strategies/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=STRATEGY % 'get', check_str=base.RULE_ADMIN_API, description='Get a strategy.', operations=[ { 'path': '/v1/strategies/{strategy_uuid}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=STRATEGY % 'get_all', check_str=base.RULE_ADMIN_API, description='List all strategies.', operations=[ { 'path': '/v1/strategies', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=STRATEGY % 'state', check_str=base.RULE_ADMIN_API, description='Get state of strategy.', operations=[ { 'path': '/v1/strategies{strategy_uuid}/state', 'method': 'GET' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/policy.py0000664000175000017500000001143500000000000021457 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Watcher.""" import sys from oslo_config import cfg from oslo_policy import policy from watcher.common import exception from watcher.common import policies _ENFORCER = None CONF = cfg.CONF # we can get a policy enforcer by this init. # oslo policy support change policy rule dynamically. # at present, policy.enforce will reload the policy rules when it checks # the policy files have been touched. def init(policy_file=None, rules=None, default_rule=None, use_conf=True, overwrite=True): """Init an Enforcer class. :param policy_file: Custom policy file to use, if none is specified, ``conf.policy_file`` will be used. :param rules: Default dictionary / Rules to use. It will be considered just in the first instantiation. If :meth:`load_rules` with ``force_reload=True``, :meth:`clear` or :meth:`set_rules` with ``overwrite=True`` is called this will be overwritten. :param default_rule: Default rule to use, conf.default_rule will be used if none is specified. :param use_conf: Whether to load rules from cache or config file. :param overwrite: Whether to overwrite existing rules when reload rules from config file. """ global _ENFORCER if not _ENFORCER: # https://docs.openstack.org/oslo.policy/latest/admin/index.html _ENFORCER = policy.Enforcer(CONF, policy_file=policy_file, rules=rules, default_rule=default_rule, use_conf=use_conf, overwrite=overwrite) _ENFORCER.register_defaults(policies.list_rules()) return _ENFORCER def enforce(context, rule=None, target=None, do_raise=True, exc=None, *args, **kwargs): """Checks authorization of a rule against the target and credentials. :param dict context: As much information about the user performing the action as possible. :param rule: The rule to evaluate. :param dict target: As much information about the object being operated on as possible. :param do_raise: Whether to raise an exception or not if check fails. :param exc: Class of the exception to raise if the check fails. Any remaining arguments passed to :meth:`enforce` (both positional and keyword arguments) will be passed to the exception class. If not specified, :class:`PolicyNotAuthorized` will be used. :return: ``False`` if the policy does not allow the action and `exc` is not provided; otherwise, returns a value that evaluates to ``True``. Note: for rules using the "case" expression, this ``True`` value will be the specified string from the expression. """ enforcer = init() credentials = context.to_dict() if not exc: exc = exception.PolicyNotAuthorized if target is None: target = {'project_id': context.project_id, 'user_id': context.user_id} return enforcer.enforce(rule, target, credentials, do_raise=do_raise, exc=exc, *args, **kwargs) def get_enforcer(): # This method is for use by oslopolicy CLI scripts. Those scripts need the # 'output-file' and 'namespace' options, but having those in sys.argv means # loading the Watcher config options will fail as those are not expected # to be present. So we pass in an arg list with those stripped out. conf_args = [] # Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:] i = 1 while i < len(sys.argv): if sys.argv[i].strip('-') in ['namespace', 'output-file']: i += 2 continue conf_args.append(sys.argv[i]) i += 1 cfg.CONF(conf_args, project='watcher') init() return _ENFORCER ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/rpc.py0000664000175000017500000001047700000000000020751 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from watcher.common import context as watcher_context from watcher.common import exception __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', ] CONF = cfg.CONF LOG = log.getLogger(__name__) TRANSPORT = None NOTIFICATION_TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ exception.__name__, ] EXTRA_EXMODS = [] JsonPayloadSerializer = messaging.JsonPayloadSerializer def init(conf): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_rpc_transport( conf, allowed_remote_exmods=exmods) NOTIFICATION_TRANSPORT = messaging.get_notification_transport( conf, allowed_remote_exmods=exmods) serializer = RequestContextSerializer(JsonPayloadSerializer()) if not conf.notification_level: NOTIFIER = messaging.Notifier( NOTIFICATION_TRANSPORT, serializer=serializer, driver='noop') else: NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) def initialized(): return None not in [TRANSPORT, NOTIFIER] def cleanup(): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER if NOTIFIER is None: LOG.exception("RPC cleanup: NOTIFIER is None") TRANSPORT.cleanup() NOTIFICATION_TRANSPORT.cleanup() TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): return context.to_dict() def deserialize_context(self, context): return watcher_context.RequestContext.from_dict(context) def get_client(target, version_cap=None, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.get_rpc_client( TRANSPORT, target, version_cap=version_cap, serializer=serializer ) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None access_policy = dispatcher.DefaultRPCAccessPolicy serializer = RequestContextSerializer(serializer) return messaging.get_rpc_server( TRANSPORT, target, endpoints, executor='eventlet', serializer=serializer, access_policy=access_policy ) def get_notification_listener(targets, endpoints, serializer=None, pool=None): assert NOTIFICATION_TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.get_notification_listener( NOTIFICATION_TRANSPORT, targets, endpoints, allow_requeue=False, executor='eventlet', pool=pool, serializer=serializer ) def get_notifier(publisher_id): assert NOTIFIER is not None return NOTIFIER.prepare(publisher_id=publisher_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/scheduling.py0000664000175000017500000000507300000000000022306 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet from apscheduler import events from apscheduler.executors import pool as pool_executor from apscheduler.schedulers import background import futurist from oslo_service import service from watcher import eventlet as eventlet_helper job_events = events class GreenThreadPoolExecutor(pool_executor.BasePoolExecutor): """Green thread pool An executor that runs jobs in a green thread pool. Plugin alias: ``threadpool`` :param max_workers: the maximum number of spawned threads. """ def __init__(self, max_workers=10): pool = futurist.GreenThreadPoolExecutor(int(max_workers)) super(GreenThreadPoolExecutor, self).__init__(pool) executors = { 'default': GreenThreadPoolExecutor(), } class BackgroundSchedulerService( service.ServiceBase, background.BackgroundScheduler): def __init__(self, gconfig=None, **options): self.should_patch = eventlet_helper.is_patched() if options is None: options = {'executors': executors} else: if 'executors' not in options.keys(): options['executors'] = executors super().__init__(gconfig or {}, **options) def _main_loop(self): if self.should_patch: # NOTE(sean-k-mooney): is_patched and monkey_patch form # watcher.eventlet check a non thread local variable to early out # as we do not use eventlet_helper.patch() here to ensure # eventlet.monkey_patch() is actually called. eventlet.monkey_patch() super()._main_loop() def start(self): """Start service.""" background.BackgroundScheduler.start(self) def stop(self): """Stop service.""" self.shutdown() def wait(self): """Wait for service to complete.""" def reset(self): """Reset service. Called in case service running in daemon mode receives SIGHUP. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/service.py0000664000175000017500000002301400000000000021614 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 eNovance ## # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import socket from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import _options from oslo_log import log import oslo_messaging as messaging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from oslo_service import service from oslo_service import wsgi from oslo_utils import timeutils from watcher._i18n import _ from watcher.api import app from watcher.common import config from watcher.common import context from watcher.common import rpc from watcher.common import scheduling from watcher.conf import plugins as plugins_conf from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields from watcher import version NOTIFICATION_OPTS = [ cfg.StrOpt('notification_level', choices=[''] + list(wfields.NotificationPriority.ALL), default=wfields.NotificationPriority.INFO, help=_('Specifies the minimum level for which to send ' 'notifications. If not set, no notifications will ' 'be sent. The default is for this option to be at the ' '`INFO` level.')) ] cfg.CONF.register_opts(NOTIFICATION_OPTS) CONF = cfg.CONF LOG = log.getLogger(__name__) _DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'qpid.messaging=INFO', 'oslo.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN', 'iso8601=WARN', 'requests=WARN', 'neutronclient=WARN', 'glanceclient=WARN', 'apscheduler=WARN'] Singleton = service.Singleton class WSGIService(service.ServiceBase): """Provides ability to launch Watcher API from wsgi app.""" def __init__(self, service_name, use_ssl=False): """Initialize, but do not start the WSGI server. :param service_name: The service name of the WSGI server. :param use_ssl: Wraps the socket in an SSL context if True. """ self.service_name = service_name self.app = app.VersionSelectorApplication() self.workers = (CONF.api.workers or processutils.get_worker_count()) self.server = wsgi.Server(CONF, self.service_name, self.app, host=CONF.api.host, port=CONF.api.port, use_ssl=use_ssl, logger_name=self.service_name) def start(self): """Start serving this service using loaded configuration""" self.server.start() def stop(self): """Stop serving this API""" self.server.stop() def wait(self): """Wait for the service to stop serving this API""" self.server.wait() def reset(self): """Reset server greenpool size to default""" self.server.reset() class ServiceHeartbeat(scheduling.BackgroundSchedulerService): service_name = None def __init__(self, gconfig=None, service_name=None, **kwargs): gconfig = None or {} super(ServiceHeartbeat, self).__init__(gconfig, **kwargs) ServiceHeartbeat.service_name = service_name self.context = context.make_context() self.send_beat() def send_beat(self): host = CONF.host watcher_list = objects.Service.list( self.context, filters={'name': ServiceHeartbeat.service_name, 'host': host}) if watcher_list: watcher_service = watcher_list[0] watcher_service.last_seen_up = timeutils.utcnow() watcher_service.save() else: watcher_service = objects.Service(self.context) watcher_service.name = ServiceHeartbeat.service_name watcher_service.host = host watcher_service.create() def add_heartbeat_job(self): self.add_job(self.send_beat, 'interval', seconds=60, next_run_time=datetime.datetime.now()) @classmethod def get_service_name(cls): return CONF.host, cls.service_name def start(self): """Start service.""" self.add_heartbeat_job() super(ServiceHeartbeat, self).start() def stop(self): """Stop service.""" self.shutdown() def wait(self): """Wait for service to complete.""" def reset(self): """Reset service. Called in case service running in daemon mode receives SIGHUP. """ class Service(service.ServiceBase): API_VERSION = '1.0' def __init__(self, manager_class): super(Service, self).__init__() self.manager = manager_class() self.publisher_id = self.manager.publisher_id self.api_version = self.manager.api_version self.conductor_topic = self.manager.conductor_topic self.notification_topics = self.manager.notification_topics self.heartbeat = None self.service_name = self.manager.service_name if self.service_name: self.heartbeat = ServiceHeartbeat( service_name=self.manager.service_name) self.conductor_endpoints = [ ep(self) for ep in self.manager.conductor_endpoints ] self.notification_endpoints = self.manager.notification_endpoints self._conductor_client = None self.conductor_topic_handler = None self.notification_handler = None if self.conductor_topic and self.conductor_endpoints: self.conductor_topic_handler = self.build_topic_handler( self.conductor_topic, self.conductor_endpoints) if self.notification_topics and self.notification_endpoints: self.notification_handler = self.build_notification_handler( self.notification_topics, self.notification_endpoints ) @property def conductor_client(self): if self._conductor_client is None: target = messaging.Target( topic=self.conductor_topic, version=self.API_VERSION, ) self._conductor_client = rpc.get_client( target, serializer=base.WatcherObjectSerializer() ) return self._conductor_client @conductor_client.setter def conductor_client(self, c): self.conductor_client = c def build_topic_handler(self, topic_name, endpoints=()): target = messaging.Target( topic=topic_name, # For compatibility, we can override it with 'host' opt server=CONF.host or socket.gethostname(), version=self.api_version, ) return rpc.get_server( target, endpoints, serializer=rpc.JsonPayloadSerializer() ) def build_notification_handler(self, topic_names, endpoints=()): targets = [] for topic in topic_names: kwargs = {} if '.' in topic: exchange, topic = topic.split('.') kwargs['exchange'] = exchange kwargs['topic'] = topic targets.append(messaging.Target(**kwargs)) return rpc.get_notification_listener( targets, endpoints, serializer=rpc.JsonPayloadSerializer(), pool=CONF.host ) def start(self): LOG.debug("Connecting to '%s'", CONF.transport_url) if self.conductor_topic_handler: self.conductor_topic_handler.start() if self.notification_handler: self.notification_handler.start() if self.heartbeat: self.heartbeat.start() def stop(self): LOG.debug("Disconnecting from '%s'", CONF.transport_url) if self.conductor_topic_handler: self.conductor_topic_handler.stop() if self.notification_handler: self.notification_handler.stop() if self.heartbeat: self.heartbeat.stop() def reset(self): """Reset a service in case it received a SIGHUP.""" def wait(self): """Wait for service to complete.""" def check_api_version(self, ctx): api_manager_version = self.conductor_client.call( ctx, 'check_api_version', api_version=self.api_version) return api_manager_version def launch(conf, service_, workers=1, restart_method='mutate'): return service.launch(conf, service_, workers, restart_method) def prepare_service(argv=(), conf=cfg.CONF): log.register_options(conf) gmr_opts.set_defaults(conf) config.parse_args(argv) cfg.set_defaults(_options.log_opts, default_log_levels=_DEFAULT_LOG_LEVELS) log.setup(conf, 'python-watcher') conf.log_opt_values(LOG, log.DEBUG) objects.register_all() gmr.TextGuruMeditation.register_section( _('Plugins'), plugins_conf.show_plugins) gmr.TextGuruMeditation.setup_autorun(version, conf=conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/service_manager.py0000664000175000017500000000261500000000000023312 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2016 Servionica ## # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc class ServiceManager(object, metaclass=abc.ABCMeta): @property @abc.abstractmethod def service_name(self): raise NotImplementedError() @property @abc.abstractmethod def api_version(self): raise NotImplementedError() @property @abc.abstractmethod def publisher_id(self): raise NotImplementedError() @property @abc.abstractmethod def conductor_topic(self): raise NotImplementedError() @property @abc.abstractmethod def notification_topics(self): raise NotImplementedError() @property @abc.abstractmethod def conductor_endpoints(self): raise NotImplementedError() @property @abc.abstractmethod def notification_endpoints(self): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/common/utils.py0000664000175000017500000001405600000000000021322 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities and helper functions.""" import asyncio import datetime import inspect import random import re import string from croniter import croniter import eventlet from eventlet import tpool from jsonschema import validators from oslo_config import cfg from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils from watcher.common import exception CONF = cfg.CONF LOG = log.getLogger(__name__) class Struct(dict): """Specialized dict where you access an item like an attribute >>> struct = Struct() >>> struct['a'] = 1 >>> struct.b = 2 >>> assert struct.a == 1 >>> assert struct['b'] == 2 """ def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): try: self[name] = value except KeyError: raise AttributeError(name) generate_uuid = uuidutils.generate_uuid is_uuid_like = uuidutils.is_uuid_like is_int_like = strutils.is_int_like def is_cron_like(value): """Return True is submitted value is like cron syntax""" try: croniter(value, datetime.datetime.now()) except Exception as e: raise exception.CronFormatIsInvalid(message=str(e)) return True def safe_rstrip(value, chars=None): """Removes trailing characters from a string if that does not make it empty :param value: A string value that will be stripped. :param chars: Characters to remove. :return: Stripped value. """ if not isinstance(value, str): LOG.warning( "Failed to remove trailing character. Returning original object." "Supplied object is not a string: %s,", value) return value return value.rstrip(chars) or value def is_hostname_safe(hostname): """Determine if the supplied hostname is RFC compliant. Check that the supplied hostname conforms to: * http://en.wikipedia.org/wiki/Hostname * http://tools.ietf.org/html/rfc952 * http://tools.ietf.org/html/rfc1123 :param hostname: The hostname to be validated. :returns: True if valid. False if not. """ m = r'^[a-z0-9]([a-z0-9\-]{0,61}[a-z0-9])?$' return (isinstance(hostname, str) and (re.match(m, hostname) is not None)) def get_cls_import_path(cls): """Return the import path of a given class""" module = cls.__module__ if module is None or module == str.__module__: return cls.__name__ return module + '.' + cls.__name__ # Default value feedback extension as jsonschema doesn't support it def extend_with_default(validator_class): validate_properties = validator_class.VALIDATORS["properties"] def set_defaults(validator, properties, instance, schema): for prop, subschema in properties.items(): if "default" in subschema and instance is not None: instance.setdefault(prop, subschema["default"]) for error in validate_properties( validator, properties, instance, schema ): yield error return validators.extend(validator_class, {"properties": set_defaults}) # Parameter strict check extension as jsonschema doesn't support it def extend_with_strict_schema(validator_class): validate_properties = validator_class.VALIDATORS["properties"] def strict_schema(validator, properties, instance, schema): if instance is None: return for para in instance.keys(): if para not in properties.keys(): raise exception.AuditParameterNotAllowed(parameter=para) for error in validate_properties( validator, properties, instance, schema ): yield error return validators.extend(validator_class, {"properties": strict_schema}) StrictDefaultValidatingDraft4Validator = extend_with_default( extend_with_strict_schema(validators.Draft4Validator)) Draft4Validator = validators.Draft4Validator def random_string(n): return ''.join([random.choice( string.ascii_letters + string.digits) for i in range(n)]) # Some clients (e.g. MAAS) use asyncio, which isn't compatible with Eventlet. # As a workaround, we're delegating such calls to a native thread. def async_compat_call(f, *args, **kwargs): timeout = kwargs.pop('timeout', None) async def async_wrapper(): ret = f(*args, **kwargs) if inspect.isawaitable(ret): return await asyncio.wait_for(ret, timeout) return ret def tpool_wrapper(): # This will run in a separate native thread. Ideally, there should be # a single thread permanently running an asyncio loop, but for # convenience we'll use eventlet.tpool, which leverages a thread pool. # # That being considered, we're setting up a temporary asyncio loop to # handle this call. loop = asyncio.new_event_loop() try: asyncio.set_event_loop(loop) return loop.run_until_complete(async_wrapper()) finally: loop.close() # We'll use eventlet timeouts as an extra precaution and asyncio timeouts # to avoid lingering threads. For consistency, we'll convert eventlet # timeout exceptions to asyncio timeout errors. with eventlet.timeout.Timeout( seconds=timeout, exception=asyncio.TimeoutError("Timeout: %ss" % timeout)): return tpool.execute(tpool_wrapper) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/conf/0000775000175000017500000000000000000000000017237 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/__init__.py0000664000175000017500000000455100000000000021355 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # Copyright (c) 2016 Intel Corp # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.conf import api from watcher.conf import applier from watcher.conf import cinder_client from watcher.conf import clients_auth from watcher.conf import collector from watcher.conf import datasources from watcher.conf import db from watcher.conf import decision_engine from watcher.conf import exception from watcher.conf import glance_client from watcher.conf import gnocchi_client from watcher.conf import grafana_client from watcher.conf import grafana_translators from watcher.conf import ironic_client from watcher.conf import keystone_client from watcher.conf import maas_client from watcher.conf import monasca_client from watcher.conf import neutron_client from watcher.conf import nova_client from watcher.conf import paths from watcher.conf import placement_client from watcher.conf import planner from watcher.conf import prometheus_client from watcher.conf import service CONF = cfg.CONF service.register_opts(CONF) api.register_opts(CONF) paths.register_opts(CONF) exception.register_opts(CONF) datasources.register_opts(CONF) db.register_opts(CONF) planner.register_opts(CONF) applier.register_opts(CONF) decision_engine.register_opts(CONF) maas_client.register_opts(CONF) monasca_client.register_opts(CONF) nova_client.register_opts(CONF) glance_client.register_opts(CONF) gnocchi_client.register_opts(CONF) keystone_client.register_opts(CONF) grafana_client.register_opts(CONF) grafana_translators.register_opts(CONF) cinder_client.register_opts(CONF) neutron_client.register_opts(CONF) clients_auth.register_opts(CONF) ironic_client.register_opts(CONF) collector.register_opts(CONF) placement_client.register_opts(CONF) prometheus_client.register_opts(CONF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/api.py0000664000175000017500000000535000000000000020365 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg api = cfg.OptGroup(name='api', title='Options for the Watcher API service') AUTH_OPTS = [ cfg.BoolOpt('enable_authentication', default=True, help='This option enables or disables user authentication ' 'via keystone. Default value is True.'), ] API_SERVICE_OPTS = [ cfg.PortOpt('port', default=9322, help='The port for the watcher API server'), cfg.HostAddressOpt('host', default='127.0.0.1', help='The listen IP address for the watcher API server' ), cfg.IntOpt('max_limit', default=1000, help='The maximum number of items returned in a single ' 'response from a collection resource'), cfg.IntOpt('workers', min=1, help='Number of workers for Watcher API service. ' 'The default is equal to the number of CPUs available ' 'if that can be determined, else a default worker ' 'count of 1 is returned.'), cfg.BoolOpt('enable_ssl_api', default=False, help="Enable the integrated stand-alone API to service " "requests via HTTPS instead of HTTP. If there is a " "front-end service performing HTTPS offloading from " "the service, this option should be False; note, you " "will want to change public API endpoint to represent " "SSL termination URL with 'public_endpoint' option."), cfg.BoolOpt('enable_webhooks_auth', default=True, help='This option enables or disables webhook request ' 'authentication via keystone. Default value is True.'), ] def register_opts(conf): conf.register_group(api) conf.register_opts(API_SERVICE_OPTS, group=api) conf.register_opts(AUTH_OPTS) def list_opts(): return [(api, API_SERVICE_OPTS), ('DEFAULT', AUTH_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/applier.py0000664000175000017500000000423700000000000021253 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg watcher_applier = cfg.OptGroup(name='watcher_applier', title='Options for the Applier messaging ' 'core') APPLIER_MANAGER_OPTS = [ cfg.IntOpt('workers', default=1, min=1, required=True, help='Number of workers for applier, default value is 1.'), cfg.StrOpt('conductor_topic', default='watcher.applier.control', help='The topic name used for ' 'control events, this topic ' 'used for rpc call '), cfg.StrOpt('publisher_id', default='watcher.applier.api', help='The identifier used by watcher ' 'module on the message broker'), cfg.StrOpt('workflow_engine', default='taskflow', required=True, help='Select the engine to use to execute the workflow'), ] APPLIER_OPTS = [ cfg.BoolOpt('rollback_when_actionplan_failed', default=False, help='If set True, the failed actionplan will rollback ' 'when executing. Default value is False.'), ] def register_opts(conf): conf.register_group(watcher_applier) conf.register_opts(APPLIER_MANAGER_OPTS, group=watcher_applier) conf.register_opts(APPLIER_OPTS, group=watcher_applier) def list_opts(): return [(watcher_applier, APPLIER_MANAGER_OPTS), (watcher_applier, APPLIER_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/cinder_client.py0000664000175000017500000000307100000000000022414 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg cinder_client = cfg.OptGroup(name='cinder_client', title='Configuration Options for Cinder') CINDER_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='3', help='Version of Cinder API to use in cinderclient.'), cfg.StrOpt('endpoint_type', default='publicURL', help='Type of endpoint to use in cinderclient. ' 'Supported values: internalURL, publicURL, adminURL. ' 'The default is publicURL.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(cinder_client) conf.register_opts(CINDER_CLIENT_OPTS, group=cinder_client) def list_opts(): return [(cinder_client, CINDER_CLIENT_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/clients_auth.py0000664000175000017500000000207500000000000022277 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import loading as ka_loading WATCHER_CLIENTS_AUTH = 'watcher_clients_auth' def register_opts(conf): ka_loading.register_session_conf_options(conf, WATCHER_CLIENTS_AUTH) ka_loading.register_auth_conf_options(conf, WATCHER_CLIENTS_AUTH) def list_opts(): return [(WATCHER_CLIENTS_AUTH, ka_loading.get_session_conf_options() + ka_loading.get_auth_common_conf_options())] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/collector.py0000664000175000017500000000333100000000000021577 0ustar00zuulzuul00000000000000# Copyright (c) 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg collector = cfg.OptGroup(name='collector', title='Defines the parameters of ' 'the module model collectors') COLLECTOR_OPTS = [ cfg.ListOpt('collector_plugins', default=['compute'], help=""" The cluster data model plugin names. Supported in-tree collectors include: * ``compute`` - data model collector for nova * ``storage`` - data model collector for cinder * ``baremetal`` - data model collector for ironic Custom data model collector plugins can be defined with the ``watcher_cluster_data_model_collectors`` extension point. """), cfg.IntOpt('api_call_retries', default=10, help="Number of retries before giving up on external service " "calls."), cfg.IntOpt('api_query_timeout', default=1, help="Time before retry after failed call to external service.") ] def register_opts(conf): conf.register_group(collector) conf.register_opts(COLLECTOR_OPTS, group=collector) def list_opts(): return [(collector, COLLECTOR_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/datasources.py0000664000175000017500000000426100000000000022131 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.decision_engine.datasources import manager datasources = cfg.OptGroup(name='watcher_datasources', title='Configuration Options for watcher' ' datasources') possible_datasources = list(manager.DataSourceManager.metric_map.keys()) DATASOURCES_OPTS = [ cfg.ListOpt("datasources", help="Datasources to use in order to query the needed metrics." " If one of strategy metric is not available in the first" " datasource, the next datasource will be chosen. This is" " the default for all strategies unless a strategy has a" " specific override.", item_type=cfg.types.String(choices=possible_datasources), default=possible_datasources), cfg.IntOpt('query_max_retries', min=1, default=10, mutable=True, help='How many times Watcher is trying to query again', deprecated_group="gnocchi_client"), cfg.IntOpt('query_timeout', min=0, default=1, mutable=True, help='How many seconds Watcher should wait to do query again', deprecated_group="gnocchi_client") ] def register_opts(conf): conf.register_group(datasources) conf.register_opts(DATASOURCES_OPTS, group=datasources) def list_opts(): return [(datasources, DATASOURCES_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/db.py0000664000175000017500000000250000000000000020173 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_db import options as oslo_db_options from watcher.conf import paths _DEFAULT_SQL_CONNECTION = 'sqlite:///{0}'.format( paths.state_path_def('watcher.sqlite')) database = cfg.OptGroup(name='database', title='Configuration Options for database') SQL_OPTS = [ cfg.StrOpt('mysql_engine', default='InnoDB', help='MySQL engine to use.') ] def register_opts(conf): oslo_db_options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION) conf.register_group(database) conf.register_opts(SQL_OPTS, group=database) def list_opts(): return [(database, SQL_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/decision_engine.py0000664000175000017500000000773000000000000022742 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg watcher_decision_engine = cfg.OptGroup(name='watcher_decision_engine', title='Defines the parameters of ' 'the module decision engine') WATCHER_DECISION_ENGINE_OPTS = [ cfg.StrOpt('conductor_topic', default='watcher.decision.control', help='The topic name used for ' 'control events, this topic ' 'used for RPC calls'), cfg.ListOpt('notification_topics', default=['nova.versioned_notifications', 'watcher.watcher_notifications'], help='The exchange and topic names from which ' 'notification events will be listened to. ' 'The exchange should be specified to get ' 'an ability to use pools.'), cfg.StrOpt('publisher_id', default='watcher.decision.api', help='The identifier used by the Watcher ' 'module on the message broker'), cfg.IntOpt('max_audit_workers', default=2, required=True, help='The maximum number of threads that can be used to ' 'execute audits in parallel.'), cfg.IntOpt('max_general_workers', default=4, required=True, help='The maximum number of threads that can be used to ' 'execute general tasks in parallel. The number of general ' 'workers will not increase depending on the number of ' 'audit workers!'), cfg.IntOpt('action_plan_expiry', default=24, mutable=True, help='An expiry timespan(hours). Watcher invalidates any ' 'action plan for which its creation time ' '-whose number of hours has been offset by this value-' ' is older that the current time.'), cfg.IntOpt('check_periodic_interval', default=30 * 60, mutable=True, help='Interval (in seconds) for checking action plan expiry.'), cfg.StrOpt('metric_map_path', default='/etc/watcher/metric_map.yaml', help='Path to metric map yaml formatted file. ' ' ' 'The file contains a map per datasource whose keys ' 'are the metric names as recognized by watcher and the ' 'value is the real name of the metric in the datasource. ' 'For example:: \n\n' ' monasca:\n' ' instance_cpu_usage: VM_CPU\n' ' gnocchi:\n' ' instance_cpu_usage: cpu_vm_util\n\n' 'This file is optional.'), cfg.IntOpt('continuous_audit_interval', default=10, mutable=True, help='Interval (in seconds) for checking newly created ' 'continuous audits.')] def register_opts(conf): conf.register_group(watcher_decision_engine) conf.register_opts(WATCHER_DECISION_ENGINE_OPTS, group=watcher_decision_engine) def list_opts(): return [(watcher_decision_engine, WATCHER_DECISION_ENGINE_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/exception.py0000664000175000017500000000173500000000000021615 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg EXC_LOG_OPTS = [ cfg.BoolOpt('fatal_exception_format_errors', default=False, help='Make exception message format errors fatal.'), ] def register_opts(conf): conf.register_opts(EXC_LOG_OPTS) def list_opts(): return [('DEFAULT', EXC_LOG_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/glance_client.py0000664000175000017500000000307100000000000022401 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg glance_client = cfg.OptGroup(name='glance_client', title='Configuration Options for Glance') GLANCE_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='2', help='Version of Glance API to use in glanceclient.'), cfg.StrOpt('endpoint_type', default='publicURL', help='Type of endpoint to use in glanceclient. ' 'Supported values: internalURL, publicURL, adminURL. ' 'The default is publicURL.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(glance_client) conf.register_opts(GLANCE_CLIENT_OPTS, group=glance_client) def list_opts(): return [(glance_client, GLANCE_CLIENT_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/gnocchi_client.py0000664000175000017500000000305500000000000022564 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg gnocchi_client = cfg.OptGroup(name='gnocchi_client', title='Configuration Options for Gnocchi') GNOCCHI_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='1', help='Version of Gnocchi API to use in gnocchiclient.'), cfg.StrOpt('endpoint_type', default='public', help='Type of endpoint to use in gnocchi client. ' 'Supported values: internal, public, admin. ' 'The default is public.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.') ] def register_opts(conf): conf.register_group(gnocchi_client) conf.register_opts(GNOCCHI_CLIENT_OPTS, group=gnocchi_client) def list_opts(): return [(gnocchi_client, GNOCCHI_CLIENT_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/grafana_client.py0000664000175000017500000001537400000000000022560 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg grafana_client = cfg.OptGroup(name='grafana_client', title='Configuration Options for Grafana', help="See https://docs.openstack.org/watcher/lat" "est/datasources/grafana.html for details " "on how these options are used.") GRAFANA_CLIENT_OPTS = [ # TODO(Dantali0n) each individual metric could have its own token. # A similar structure to the database_map would solve this. cfg.StrOpt('token', default=None, help="Authentication token to gain access"), # TODO(Dantali0n) each individual metric could have its own base url. # A similar structure to the database_map would solve this. cfg.StrOpt('base_url', default=None, help="First part of the url (including https:// or http://) up " "until project id part. " "Example: https://secure.org/api/datasource/proxy/"), cfg.DictOpt('project_id_map', default={ 'host_cpu_usage': None, 'host_ram_usage': None, 'host_outlet_temp': None, 'host_inlet_temp': None, 'host_airflow': None, 'host_power': None, 'instance_cpu_usage': None, 'instance_ram_usage': None, 'instance_ram_allocated': None, 'instance_l3_cache_usage': None, 'instance_root_disk_size': None, }, help="Mapping of datasource metrics to grafana project ids. " "Dictionary values should be positive integers. " "Example: 7465"), cfg.DictOpt('database_map', default={ 'host_cpu_usage': None, 'host_ram_usage': None, 'host_outlet_temp': None, 'host_inlet_temp': None, 'host_airflow': None, 'host_power': None, 'instance_cpu_usage': None, 'instance_ram_usage': None, 'instance_ram_allocated': None, 'instance_l3_cache_usage': None, 'instance_root_disk_size': None, }, help="Mapping of datasource metrics to grafana databases. " "Values should be strings. Example: influx_production"), cfg.DictOpt('attribute_map', default={ 'host_cpu_usage': None, 'host_ram_usage': None, 'host_outlet_temp': None, 'host_inlet_temp': None, 'host_airflow': None, 'host_power': None, 'instance_cpu_usage': None, 'instance_ram_usage': None, 'instance_ram_allocated': None, 'instance_l3_cache_usage': None, 'instance_root_disk_size': None, }, help="Mapping of datasource metrics to resource attributes. " "For a complete list of available attributes see " "https://docs.openstack.org/watcher/latest/datasources/gr" "afana.html#attribute " "Values should be strings. Example: hostname"), cfg.DictOpt('translator_map', default={ 'host_cpu_usage': None, 'host_ram_usage': None, 'host_outlet_temp': None, 'host_inlet_temp': None, 'host_airflow': None, 'host_power': None, 'instance_cpu_usage': None, 'instance_ram_usage': None, 'instance_ram_allocated': None, 'instance_l3_cache_usage': None, 'instance_root_disk_size': None, }, help="Mapping of datasource metrics to grafana translators. " "Values should be strings. Example: influxdb"), cfg.DictOpt('query_map', # {0} = aggregate # {1} = attribute # {2} = period # {3} = granularity # {4} = { influxdb: retention_period, } default={ 'host_cpu_usage': None, 'host_ram_usage': None, 'host_outlet_temp': None, 'host_inlet_temp': None, 'host_airflow': None, 'host_power': None, 'instance_cpu_usage': None, 'instance_ram_usage': None, 'instance_ram_allocated': None, 'instance_l3_cache_usage': None, 'instance_root_disk_size': None, }, help="Mapping of datasource metrics to grafana queries. " "Values should be strings for which the .format method " "will transform it. The transformation offers five " "parameters to the query labeled {0} to {4}. {0} will be " "replaced with the aggregate, {1} with the resource " "attribute, {2} with the period, {3} with the " "granularity and {4} with translator specifics for " "InfluxDB this will be the retention period. " "These queries will need to be constructed using tools " "such as Postman. Example: SELECT cpu FROM {4}." "cpu_percent WHERE host == '{1}' AND time > now()-{2}s"), cfg.IntOpt('http_timeout', min=0, default=60, mutable=True, help='Timeout for Grafana request') ] def register_opts(conf): conf.register_group(grafana_client) conf.register_opts(GRAFANA_CLIENT_OPTS, group=grafana_client) def list_opts(): return [(grafana_client, GRAFANA_CLIENT_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/grafana_translators.py0000664000175000017500000000316300000000000023647 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg grafana_translators = cfg.OptGroup(name='grafana_translators', title='Configuration Options for Grafana ' 'transalators') GRAFANA_TRANSLATOR_INFLUX_OPTS = [ cfg.DictOpt('retention_periods', default={ 'one_week': 604800, 'one_month': 2592000, 'five_years': 31556952 }, help="Keys are the names of retention periods in InfluxDB and " "the values should correspond with the maximum time they " "can retain in seconds. Example: {'one_day': 86400}")] def register_opts(conf): conf.register_group(grafana_translators) conf.register_opts(GRAFANA_TRANSLATOR_INFLUX_OPTS, group=grafana_translators) def list_opts(): return [(grafana_translators, GRAFANA_TRANSLATOR_INFLUX_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/ironic_client.py0000664000175000017500000000306500000000000022436 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg ironic_client = cfg.OptGroup(name='ironic_client', title='Configuration Options for Ironic') IRONIC_CLIENT_OPTS = [ cfg.StrOpt('api_version', default=1, help='Version of Ironic API to use in ironicclient.'), cfg.StrOpt('endpoint_type', default='publicURL', help='Type of endpoint to use in ironicclient. ' 'Supported values: internalURL, publicURL, adminURL. ' 'The default is publicURL.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(ironic_client) conf.register_opts(IRONIC_CLIENT_OPTS, group=ironic_client) def list_opts(): return [(ironic_client, IRONIC_CLIENT_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/keystone_client.py0000664000175000017500000000250500000000000023012 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg keystone_client = cfg.OptGroup(name='keystone_client', title='Configuration Options for Keystone') KEYSTONE_CLIENT_OPTS = [ cfg.StrOpt('interface', default='admin', choices=['internal', 'public', 'admin'], help='Type of endpoint to use in keystoneclient.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(keystone_client) conf.register_opts(KEYSTONE_CLIENT_OPTS, group=keystone_client) def list_opts(): return [(keystone_client, KEYSTONE_CLIENT_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/maas_client.py0000664000175000017500000000237500000000000022077 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg maas_client = cfg.OptGroup(name='maas_client', title='Configuration Options for MaaS') MAAS_CLIENT_OPTS = [ cfg.StrOpt('url', help='MaaS URL, example: http://1.2.3.4:5240/MAAS'), cfg.StrOpt('api_key', help='MaaS API authentication key.'), cfg.IntOpt('timeout', default=60, help='MaaS client operation timeout in seconds.')] def register_opts(conf): conf.register_group(maas_client) conf.register_opts(MAAS_CLIENT_OPTS, group=maas_client) def list_opts(): return [(maas_client, MAAS_CLIENT_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/monasca_client.py0000664000175000017500000000307400000000000022574 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg monasca_client = cfg.OptGroup(name='monasca_client', title='Configuration Options for Monasca') MONASCA_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='2_0', help='Version of Monasca API to use in monascaclient.'), cfg.StrOpt('interface', default='internal', help='Type of interface used for monasca endpoint. ' 'Supported values: internal, public, admin. ' 'The default is internal.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(monasca_client) conf.register_opts(MONASCA_CLIENT_OPTS, group=monasca_client) def list_opts(): return [(monasca_client, MONASCA_CLIENT_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/neutron_client.py0000664000175000017500000000311000000000000022634 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg neutron_client = cfg.OptGroup(name='neutron_client', title='Configuration Options for Neutron') NEUTRON_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='2.0', help='Version of Neutron API to use in neutronclient.'), cfg.StrOpt('endpoint_type', default='publicURL', help='Type of endpoint to use in neutronclient. ' 'Supported values: internalURL, publicURL, adminURL. ' 'The default is publicURL.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(neutron_client) conf.register_opts(NEUTRON_CLIENT_OPTS, group=neutron_client) def list_opts(): return [(neutron_client, NEUTRON_CLIENT_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/nova_client.py0000664000175000017500000000360300000000000022114 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.common import clients nova_client = cfg.OptGroup(name='nova_client', title='Configuration Options for Nova') NOVA_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='2.56', help=""" Version of Nova API to use in novaclient. Minimum required version: %s Certain Watcher features depend on a minimum version of the compute API being available which is enforced with this option. See https://docs.openstack.org/nova/latest/reference/api-microversion-history.html for the compute API microversion history. """ % clients.MIN_NOVA_API_VERSION), cfg.StrOpt('endpoint_type', default='publicURL', help='Type of endpoint to use in novaclient. ' 'Supported values: internalURL, publicURL, adminURL. ' 'The default is publicURL.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(nova_client) conf.register_opts(NOVA_CLIENT_OPTS, group=nova_client) def list_opts(): return [(nova_client, NOVA_CLIENT_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/opts.py0000664000175000017500000000476300000000000020610 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is the single point of entry to generate the sample configuration file for Watcher. It collects all the necessary info from the other modules in this package. It is assumed that: * every other module in this package has a 'list_opts' function which return a dict where * the keys are strings which are the group names * the value of each key is a list of config options for that group * the watcher.conf package doesn't have further packages with config options * this module is only used in the context of sample file generation """ import importlib import os import pkgutil LIST_OPTS_FUNC_NAME = "list_opts" def list_opts(): """Grouped list of all the Watcher-specific configuration options :return: A list of ``(group, [opt_1, opt_2])`` tuple pairs, where ``group`` is either a group name as a string or an OptGroup object. """ opts = list() module_names = _list_module_names() imported_modules = _import_modules(module_names) for mod in imported_modules: opts.extend(mod.list_opts()) return opts def _list_module_names(): module_names = [] package_path = os.path.dirname(os.path.abspath(__file__)) for __, modname, ispkg in pkgutil.iter_modules(path=[package_path]): if modname == "opts" or ispkg: continue else: module_names.append(modname) return module_names def _import_modules(module_names): imported_modules = [] for modname in module_names: mod = importlib.import_module("watcher.conf." + modname) if not hasattr(mod, LIST_OPTS_FUNC_NAME): msg = "The module 'watcher.conf.%s' should have a '%s' "\ "function which returns the config options." % \ (modname, LIST_OPTS_FUNC_NAME) raise Exception(msg) else: imported_modules.append(mod) return imported_modules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/paths.py0000664000175000017500000000335600000000000020737 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import os PATH_OPTS = [ cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../')), help='Directory where the watcher python module is installed.'), cfg.StrOpt('bindir', default='$pybasedir/bin', help='Directory where watcher binaries are installed.'), cfg.StrOpt('state_path', default='$pybasedir', help="Top-level directory for maintaining watcher's state."), ] def basedir_def(*args): """Return an uninterpolated path relative to $pybasedir.""" return os.path.join('$pybasedir', *args) def bindir_def(*args): """Return an uninterpolated path relative to $bindir.""" return os.path.join('$bindir', *args) def state_path_def(*args): """Return an uninterpolated path relative to $state_path.""" return os.path.join('$state_path', *args) def register_opts(conf): conf.register_opts(PATH_OPTS) def list_opts(): return [('DEFAULT', PATH_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/placement_client.py0000664000175000017500000000275100000000000023124 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg placement_group = cfg.OptGroup( 'placement_client', title='Placement Service Options', help="Configuration options for connecting to the placement API service") placement_opts = [ cfg.StrOpt('api_version', default='1.29', help='microversion of placement API when using ' 'placement service.'), cfg.StrOpt('interface', default='public', choices=['internal', 'public', 'admin'], help='Type of endpoint when using placement service.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(placement_group) conf.register_opts(placement_opts, group=placement_group) def list_opts(): return [(placement_group, placement_opts)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/planner.py0000664000175000017500000000243100000000000021250 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg watcher_planner = cfg.OptGroup(name='watcher_planner', title='Defines the parameters of ' 'the planner') default_planner = 'weight' WATCHER_PLANNER_OPTS = { cfg.StrOpt('planner', default=default_planner, required=True, help='The selected planner used to schedule the actions') } def register_opts(conf): conf.register_group(watcher_planner) conf.register_opts(WATCHER_PLANNER_OPTS, group=watcher_planner) def list_opts(): return [(watcher_planner, WATCHER_PLANNER_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/plugins.py0000664000175000017500000000457200000000000021302 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import prettytable as ptable from watcher.applier.loading import default as applier_loader from watcher.common import utils from watcher.decision_engine.loading import default as decision_engine_loader PLUGIN_LOADERS = ( applier_loader.DefaultActionLoader, decision_engine_loader.DefaultPlannerLoader, decision_engine_loader.DefaultScoringLoader, decision_engine_loader.DefaultScoringContainerLoader, decision_engine_loader.DefaultStrategyLoader, decision_engine_loader.ClusterDataModelCollectorLoader, applier_loader.DefaultWorkFlowEngineLoader, ) def list_opts(): """Load config options for all Watcher plugins""" plugins_opts = [] for plugin_loader_cls in PLUGIN_LOADERS: plugin_loader = plugin_loader_cls() plugins_map = plugin_loader.list_available() for plugin_name, plugin_cls in plugins_map.items(): plugin_opts = plugin_cls.get_config_opts() if plugin_opts: plugins_opts.append( (plugin_loader.get_entry_name(plugin_name), plugin_opts)) return plugins_opts def _show_plugins_ascii_table(rows): headers = ["Namespace", "Plugin name", "Import path"] table = ptable.PrettyTable(field_names=headers) for row in rows: table.add_row(row) return table.get_string() def show_plugins(): rows = [] for plugin_loader_cls in PLUGIN_LOADERS: plugin_loader = plugin_loader_cls() plugins_map = plugin_loader.list_available() rows += [ (plugin_loader.get_entry_name(plugin_name), plugin_name, utils.get_cls_import_path(plugin_cls)) for plugin_name, plugin_cls in plugins_map.items()] return _show_plugins_ascii_table(rows) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/prometheus_client.py0000664000175000017500000000513100000000000023342 0ustar00zuulzuul00000000000000# Copyright 2024 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg prometheus_client = cfg.OptGroup(name='prometheus_client', title='Configuration Options for Prometheus', help="See https://docs.openstack.org/watcher/" "latest/datasources/prometheus.html for " "details on how these options are used.") PROMETHEUS_CLIENT_OPTS = [ cfg.StrOpt('host', help="The hostname or IP address for the prometheus server."), cfg.StrOpt('port', help="The port number used by the prometheus server."), cfg.StrOpt('fqdn_label', default="fqdn", help="The label that Prometheus uses to store the fqdn of " "exporters. Defaults to 'fqdn'."), cfg.StrOpt('instance_uuid_label', default="resource", help="The label that Prometheus uses to store the uuid of " "OpenStack instances. Defaults to 'resource'."), cfg.StrOpt('username', help="The basic_auth username to use to authenticate with the " "Prometheus server."), cfg.StrOpt('password', secret=True, help="The basic_auth password to use to authenticate with the " "Prometheus server."), cfg.StrOpt('cafile', help="Path to the CA certificate for establishing a TLS " "connection with the Prometheus server."), cfg.StrOpt('certfile', help="Path to the client certificate for establishing a TLS " "connection with the Prometheus server."), cfg.StrOpt('keyfile', help="Path to the client key for establishing a TLS " "connection with the Prometheus server."), ] def register_opts(conf): conf.register_group(prometheus_client) conf.register_opts(PROMETHEUS_CLIENT_OPTS, group=prometheus_client) def list_opts(): return [(prometheus_client, PROMETHEUS_CLIENT_OPTS)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/conf/service.py0000664000175000017500000000307100000000000021252 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import socket from oslo_config import cfg from watcher._i18n import _ SERVICE_OPTS = [ cfg.IntOpt('periodic_interval', default=60, mutable=True, help=_('Seconds between running periodic tasks.')), cfg.HostAddressOpt('host', default=socket.gethostname(), help=_('Name of this node. This can be an opaque ' 'identifier. It is not necessarily a hostname, ' 'FQDN, or IP address. However, the node name ' 'must be valid within an AMQP key.') ), cfg.IntOpt('service_down_time', default=90, help=_('Maximum time since last check-in for up service.')) ] def register_opts(conf): conf.register_opts(SERVICE_OPTS) def list_opts(): return [ ('DEFAULT', SERVICE_OPTS), ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/db/0000775000175000017500000000000000000000000016677 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/__init__.py0000664000175000017500000000000000000000000020776 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/api.py0000664000175000017500000007704400000000000020036 0ustar00zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base classes for storage engines """ import abc from oslo_config import cfg from oslo_db import api as db_api _BACKEND_MAPPING = {'sqlalchemy': 'watcher.db.sqlalchemy.api'} IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True) def get_instance(): """Return a DB API instance.""" return IMPL class BaseConnection(object, metaclass=abc.ABCMeta): """Base class for storage system connections.""" @abc.abstractmethod def get_goal_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching goals. Return a list of the specified columns for all goals that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of goals to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_goal(self, values): """Create a new goal. :param values: A dict containing several items used to identify and track the goal. For example: :: { 'uuid': utils.generate_uuid(), 'name': 'DUMMY', 'display_name': 'Dummy', } :returns: A goal :raises: :py:class:`~.GoalAlreadyExists` """ @abc.abstractmethod def get_goal_by_id(self, context, goal_id, eager=False): """Return a goal given its ID. :param context: The security context :param goal_id: The ID of a goal :param eager: If True, also loads One-to-X data (Default: False) :returns: A goal :raises: :py:class:`~.GoalNotFound` """ @abc.abstractmethod def get_goal_by_uuid(self, context, goal_uuid, eager=False): """Return a goal given its UUID. :param context: The security context :param goal_uuid: The UUID of a goal :param eager: If True, also loads One-to-X data (Default: False) :returns: A goal :raises: :py:class:`~.GoalNotFound` """ @abc.abstractmethod def get_goal_by_name(self, context, goal_name, eager=False): """Return a goal given its name. :param context: The security context :param goal_name: The name of a goal :param eager: If True, also loads One-to-X data (Default: False) :returns: A goal :raises: :py:class:`~.GoalNotFound` """ @abc.abstractmethod def destroy_goal(self, goal_uuid): """Destroy a goal. :param goal_uuid: The UUID of a goal :raises: :py:class:`~.GoalNotFound` """ @abc.abstractmethod def update_goal(self, goal_uuid, values): """Update properties of a goal. :param goal_uuid: The UUID of a goal :param values: A dict containing several items used to identify and track the goal. For example: :: { 'uuid': utils.generate_uuid(), 'name': 'DUMMY', 'display_name': 'Dummy', } :returns: A goal :raises: :py:class:`~.GoalNotFound` :raises: :py:class:`~.Invalid` """ def soft_delete_goal(self, goal_id): """Soft delete a goal. :param goal_id: The id or uuid of a goal. :raises: :py:class:`~.GoalNotFound` """ @abc.abstractmethod def get_strategy_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=True): """Get specific columns for matching strategies. Return a list of the specified columns for all strategies that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of strategies to return. :param marker: The last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: Direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_strategy(self, values): """Create a new strategy. :param values: A dict containing items used to identify and track the strategy. For example: :: { 'id': 1, 'uuid': utils.generate_uuid(), 'name': 'my_strategy', 'display_name': 'My strategy', 'goal_uuid': utils.generate_uuid(), } :returns: A strategy :raises: :py:class:`~.StrategyAlreadyExists` """ @abc.abstractmethod def get_strategy_by_id(self, context, strategy_id, eager=False): """Return a strategy given its ID. :param context: The security context :param strategy_id: The ID of a strategy :param eager: If True, also loads One-to-X data (Default: False) :returns: A strategy :raises: :py:class:`~.StrategyNotFound` """ @abc.abstractmethod def get_strategy_by_uuid(self, context, strategy_uuid, eager=False): """Return a strategy given its UUID. :param context: The security context :param strategy_uuid: The UUID of a strategy :param eager: If True, also loads One-to-X data (Default: False) :returns: A strategy :raises: :py:class:`~.StrategyNotFound` """ @abc.abstractmethod def get_strategy_by_name(self, context, strategy_name, eager=False): """Return a strategy given its name. :param context: The security context :param strategy_name: The name of a strategy :param eager: If True, also loads One-to-X data (Default: False) :returns: A strategy :raises: :py:class:`~.StrategyNotFound` """ @abc.abstractmethod def destroy_strategy(self, strategy_uuid): """Destroy a strategy. :param strategy_uuid: The UUID of a strategy :raises: :py:class:`~.StrategyNotFound` """ @abc.abstractmethod def update_strategy(self, strategy_uuid, values): """Update properties of a strategy. :param strategy_uuid: The UUID of a strategy :returns: A strategy :raises: :py:class:`~.StrategyNotFound` :raises: :py:class:`~.Invalid` """ def soft_delete_strategy(self, strategy_id): """Soft delete a strategy. :param strategy_id: The id or uuid of a strategy. :raises: :py:class:`~.StrategyNotFound` """ @abc.abstractmethod def get_audit_template_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching audit templates. Return a list of the specified columns for all audit templates that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of audit templates to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_audit_template(self, values): """Create a new audit template. :param values: A dict containing several items used to identify and track the audit template. For example: :: { 'uuid': utils.generate_uuid(), 'name': 'example', 'description': 'free text description' 'goal': 'DUMMY' } :returns: An audit template. :raises: :py:class:`~.AuditTemplateAlreadyExists` """ @abc.abstractmethod def get_audit_template_by_id(self, context, audit_template_id, eager=False): """Return an audit template. :param context: The security context :param audit_template_id: The id of an audit template. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit template. :raises: :py:class:`~.AuditTemplateNotFound` """ @abc.abstractmethod def get_audit_template_by_uuid(self, context, audit_template_uuid, eager=False): """Return an audit template. :param context: The security context :param audit_template_uuid: The uuid of an audit template. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit template. :raises: :py:class:`~.AuditTemplateNotFound` """ def get_audit_template_by_name(self, context, audit_template_name, eager=False): """Return an audit template. :param context: The security context :param audit_template_name: The name of an audit template. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit template. :raises: :py:class:`~.AuditTemplateNotFound` """ @abc.abstractmethod def destroy_audit_template(self, audit_template_id): """Destroy an audit template. :param audit_template_id: The id or uuid of an audit template. :raises: :py:class:`~.AuditTemplateNotFound` """ @abc.abstractmethod def update_audit_template(self, audit_template_id, values): """Update properties of an audit template. :param audit_template_id: The id or uuid of an audit template. :returns: An audit template. :raises: :py:class:`~.AuditTemplateNotFound` :raises: :py:class:`~.Invalid` """ @abc.abstractmethod def soft_delete_audit_template(self, audit_template_id): """Soft delete an audit template. :param audit_template_id: The id or uuid of an audit template. :raises: :py:class:`~.AuditTemplateNotFound` """ @abc.abstractmethod def get_audit_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching audits. Return a list of the specified columns for all audits that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of audits to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_audit(self, values): """Create a new audit. :param values: A dict containing several items used to identify and track the audit, and several dicts which are passed into the Drivers when managing this audit. For example: :: { 'uuid': utils.generate_uuid(), 'type': 'ONESHOT', } :returns: An audit. :raises: :py:class:`~.AuditAlreadyExists` """ @abc.abstractmethod def get_audit_by_id(self, context, audit_id, eager=False): """Return an audit. :param context: The security context :param audit_id: The id of an audit. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit. :raises: :py:class:`~.AuditNotFound` """ @abc.abstractmethod def get_audit_by_uuid(self, context, audit_uuid, eager=False): """Return an audit. :param context: The security context :param audit_uuid: The uuid of an audit. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit. :raises: :py:class:`~.AuditNotFound` """ def get_audit_by_name(self, context, audit_name, eager=False): """Return an audit. :param context: The security context :param audit_name: The name of an audit. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit. :raises: :py:class:`~.AuditNotFound` """ @abc.abstractmethod def destroy_audit(self, audit_id): """Destroy an audit and all associated action plans. :param audit_id: The id or uuid of an audit. :raises: :py:class:`~.AuditNotFound` """ @abc.abstractmethod def update_audit(self, audit_id, values): """Update properties of an audit. :param audit_id: The id or uuid of an audit. :returns: An audit. :raises: :py:class:`~.AuditNotFound` :raises: :py:class:`~.Invalid` """ def soft_delete_audit(self, audit_id): """Soft delete an audit and all associated action plans. :param audit_id: The id or uuid of an audit. :raises: :py:class:`~.AuditNotFound` """ @abc.abstractmethod def get_action_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching actions. Return a list of the specified columns for all actions that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of actions to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_action(self, values): """Create a new action. :param values: A dict containing several items used to identify and track the action, and several dicts which are passed into the Drivers when managing this action. For example: :: { 'uuid': utils.generate_uuid(), 'name': 'example', 'description': 'free text description' 'aggregate': 'nova aggregate name or uuid' } :returns: A action. :raises: :py:class:`~.ActionAlreadyExists` """ @abc.abstractmethod def get_action_by_id(self, context, action_id, eager=False): """Return a action. :param context: The security context :param action_id: The id of a action. :param eager: If True, also loads One-to-X data (Default: False) :returns: A action. :raises: :py:class:`~.ActionNotFound` """ @abc.abstractmethod def get_action_by_uuid(self, context, action_uuid, eager=False): """Return a action. :param context: The security context :param action_uuid: The uuid of a action. :param eager: If True, also loads One-to-X data (Default: False) :returns: A action. :raises: :py:class:`~.ActionNotFound` """ @abc.abstractmethod def destroy_action(self, action_id): """Destroy a action and all associated interfaces. :param action_id: The id or uuid of a action. :raises: :py:class:`~.ActionNotFound` :raises: :py:class:`~.ActionReferenced` """ @abc.abstractmethod def update_action(self, action_id, values): """Update properties of a action. :param action_id: The id or uuid of a action. :returns: A action. :raises: :py:class:`~.ActionNotFound` :raises: :py:class:`~.ActionReferenced` :raises: :py:class:`~.Invalid` """ def soft_delete_action(self, action_id): """Soft delete an action. :param action_id: The id or uuid of an action. :raises: :py:class:`~.ActionNotFound` """ @abc.abstractmethod def get_action_plan_list( self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching action plans. Return a list of the specified columns for all action plans that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of audits to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_action_plan(self, values): """Create a new action plan. :param values: A dict containing several items used to identify and track the action plan. :returns: An action plan. :raises: :py:class:`~.ActionPlanAlreadyExists` """ @abc.abstractmethod def get_action_plan_by_id(self, context, action_plan_id, eager=False): """Return an action plan. :param context: The security context :param action_plan_id: The id of an action plan. :param eager: If True, also loads One-to-X data (Default: False) :returns: An action plan. :raises: :py:class:`~.ActionPlanNotFound` """ @abc.abstractmethod def get_action_plan_by_uuid(self, context, action_plan__uuid, eager=False): """Return a action plan. :param context: The security context :param action_plan__uuid: The uuid of an action plan. :param eager: If True, also loads One-to-X data (Default: False) :returns: An action plan. :raises: :py:class:`~.ActionPlanNotFound` """ @abc.abstractmethod def destroy_action_plan(self, action_plan_id): """Destroy an action plan and all associated interfaces. :param action_plan_id: The id or uuid of a action plan. :raises: :py:class:`~.ActionPlanNotFound` :raises: :py:class:`~.ActionPlanReferenced` """ @abc.abstractmethod def update_action_plan(self, action_plan_id, values): """Update properties of an action plan. :param action_plan_id: The id or uuid of an action plan. :returns: An action plan. :raises: :py:class:`~.ActionPlanNotFound` :raises: :py:class:`~.ActionPlanReferenced` :raises: :py:class:`~.Invalid` """ def soft_delete_action_plan(self, action_plan_id): """Soft delete an action plan. :param action_plan_id: The id or uuid of an action plan. :raises: :py:class:`~.ActionPlanNotFound` """ @abc.abstractmethod def get_efficacy_indicator_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching efficacy indicators. Return a list of the specified columns for all efficacy indicators that match the specified filters. :param context: The security context :param columns: List of column names to return. Defaults to 'id' column when columns == None. :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of efficacy indicators to return. :param marker: The last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: Direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_efficacy_indicator(self, values): """Create a new efficacy indicator. :param values: A dict containing items used to identify and track the efficacy indicator. For example: :: { 'id': 1, 'uuid': utils.generate_uuid(), 'name': 'my_efficacy_indicator', 'display_name': 'My efficacy indicator', 'goal_uuid': utils.generate_uuid(), } :returns: An efficacy_indicator :raises: :py:class:`~.EfficacyIndicatorAlreadyExists` """ @abc.abstractmethod def get_efficacy_indicator_by_id(self, context, efficacy_indicator_id, eager=False): """Return an efficacy indicator given its ID. :param context: The security context :param efficacy_indicator_id: The ID of an efficacy indicator :param eager: If True, also loads One-to-X data (Default: False) :returns: An efficacy indicator :raises: :py:class:`~.EfficacyIndicatorNotFound` """ @abc.abstractmethod def get_efficacy_indicator_by_uuid(self, context, efficacy_indicator_uuid, eager=False): """Return an efficacy indicator given its UUID. :param context: The security context :param efficacy_indicator_uuid: The UUID of an efficacy indicator :param eager: If True, also loads One-to-X data (Default: False) :returns: An efficacy indicator :raises: :py:class:`~.EfficacyIndicatorNotFound` """ @abc.abstractmethod def get_efficacy_indicator_by_name(self, context, efficacy_indicator_name, eager=False): """Return an efficacy indicator given its name. :param context: The security context :param efficacy_indicator_name: The name of an efficacy indicator :param eager: If True, also loads One-to-X data (Default: False) :returns: An efficacy indicator :raises: :py:class:`~.EfficacyIndicatorNotFound` """ @abc.abstractmethod def destroy_efficacy_indicator(self, efficacy_indicator_uuid): """Destroy an efficacy indicator. :param efficacy_indicator_uuid: The UUID of an efficacy indicator :raises: :py:class:`~.EfficacyIndicatorNotFound` """ @abc.abstractmethod def update_efficacy_indicator(self, efficacy_indicator_id, values): """Update properties of an efficacy indicator. :param efficacy_indicator_id: The ID of an efficacy indicator :returns: An efficacy indicator :raises: :py:class:`~.EfficacyIndicatorNotFound` :raises: :py:class:`~.Invalid` """ @abc.abstractmethod def get_scoring_engine_list( self, context, columns=None, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching scoring engines. Return a list of the specified columns for all scoring engines that match the specified filters. :param context: The security context :param columns: List of column names to return. Defaults to 'id' column when columns == None. :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of scoring engines to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_scoring_engine(self, values): """Create a new scoring engine. :param values: A dict containing several items used to identify and track the scoring engine. :returns: A scoring engine. :raises: :py:class:`~.ScoringEngineAlreadyExists` """ @abc.abstractmethod def get_scoring_engine_by_id(self, context, scoring_engine_id, eager=False): """Return a scoring engine by its id. :param context: The security context :param scoring_engine_id: The id of a scoring engine. :param eager: If True, also loads One-to-X data (Default: False) :returns: A scoring engine. :raises: :py:class:`~.ScoringEngineNotFound` """ @abc.abstractmethod def get_scoring_engine_by_uuid(self, context, scoring_engine_uuid, eager=False): """Return a scoring engine by its uuid. :param context: The security context :param scoring_engine_uuid: The uuid of a scoring engine. :param eager: If True, also loads One-to-X data (Default: False) :returns: A scoring engine. :raises: :py:class:`~.ScoringEngineNotFound` """ @abc.abstractmethod def get_scoring_engine_by_name(self, context, scoring_engine_name, eager=False): """Return a scoring engine by its name. :param context: The security context :param scoring_engine_name: The name of a scoring engine. :param eager: If True, also loads One-to-X data (Default: False) :returns: A scoring engine. :raises: :py:class:`~.ScoringEngineNotFound` """ @abc.abstractmethod def destroy_scoring_engine(self, scoring_engine_id): """Destroy a scoring engine. :param scoring_engine_id: The id of a scoring engine. :raises: :py:class:`~.ScoringEngineNotFound` """ @abc.abstractmethod def update_scoring_engine(self, scoring_engine_id, values): """Update properties of a scoring engine. :param scoring_engine_id: The id of a scoring engine. :returns: A scoring engine. :raises: :py:class:`~.ScoringEngineNotFound` :raises: :py:class:`~.Invalid` """ @abc.abstractmethod def get_service_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching services. Return a list of the specified columns for all services that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of services to return. :param marker: The last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: Direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_service(self, values): """Create a new service. :param values: A dict containing items used to identify and track the service. For example: :: { 'id': 1, 'name': 'watcher-api', 'status': 'ACTIVE', 'host': 'controller' } :returns: A service :raises: :py:class:`~.ServiceAlreadyExists` """ @abc.abstractmethod def get_service_by_id(self, context, service_id, eager=False): """Return a service given its ID. :param context: The security context :param service_id: The ID of a service :param eager: If True, also loads One-to-X data (Default: False) :returns: A service :raises: :py:class:`~.ServiceNotFound` """ @abc.abstractmethod def get_service_by_name(self, context, service_name, eager=False): """Return a service given its name. :param context: The security context :param service_name: The name of a service :param eager: If True, also loads One-to-X data (Default: False) :returns: A service :raises: :py:class:`~.ServiceNotFound` """ @abc.abstractmethod def destroy_service(self, service_id): """Destroy a service. :param service_id: The ID of a service :raises: :py:class:`~.ServiceNotFound` """ @abc.abstractmethod def update_service(self, service_id, values): """Update properties of a service. :param service_id: The ID of a service :returns: A service :raises: :py:class:`~.ServiceyNotFound` :raises: :py:class:`~.Invalid` """ @abc.abstractmethod def soft_delete_service(self, service_id): """Soft delete a service. :param service_id: The id of a service. :returns: A service. :raises: :py:class:`~.ServiceNotFound` """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/migration.py0000664000175000017500000000314400000000000021244 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" from oslo_config import cfg from stevedore import driver _IMPL = None def get_backend(): global _IMPL if not _IMPL: cfg.CONF.import_opt('backend', 'oslo_db.options', group='database') _IMPL = driver.DriverManager("watcher.database.migration_backend", cfg.CONF.database.backend).driver return _IMPL def upgrade(version=None): """Migrate the database to `version` or the most recent version.""" return get_backend().upgrade(version) def downgrade(version=None): return get_backend().downgrade(version) def version(): return get_backend().version() def stamp(version): return get_backend().stamp(version) def revision(message, autogenerate): return get_backend().revision(message, autogenerate) def create_schema(): return get_backend().create_schema() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/purge.py0000664000175000017500000004073200000000000020401 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections import datetime import itertools import sys from oslo_log import log from oslo_utils import strutils import prettytable as ptable from watcher._i18n import _ from watcher._i18n import lazy_translation_enabled from watcher.common import context from watcher.common import exception from watcher.common import utils from watcher import objects LOG = log.getLogger(__name__) class WatcherObjectsMap(object): """Wrapper to deal with watcher objects per type This wrapper object contains a list of watcher objects per type. Its main use is to simplify the merge of watcher objects by avoiding duplicates, but also for representing the relationships between these objects. """ # This is for generating the .pot translations keymap = collections.OrderedDict([ ("goals", _("Goals")), ("strategies", _("Strategies")), ("audit_templates", _("Audit Templates")), ("audits", _("Audits")), ("action_plans", _("Action Plans")), ("actions", _("Actions")), ]) def __init__(self): for attr_name in self.keys(): setattr(self, attr_name, []) def values(self): return (getattr(self, key) for key in self.keys()) @classmethod def keys(cls): return cls.keymap.keys() def __iter__(self): return itertools.chain(*self.values()) def __add__(self, other): new_map = self.__class__() # Merge the 2 items dicts into a new object (and avoid dupes) for attr_name, initials, others in zip(self.keys(), self.values(), other.values()): # Creates a copy merged = initials[:] initials_ids = [item.id for item in initials] non_dupes = [item for item in others if item.id not in initials_ids] merged += non_dupes setattr(new_map, attr_name, merged) return new_map def __str__(self): out = "" for key, vals in zip(self.keys(), self.values()): ids = [val.id for val in vals] out += "%(key)s: %(val)s" % (dict(key=key, val=ids)) out += "\n" return out def __len__(self): return sum(len(getattr(self, key)) for key in self.keys()) def get_count_table(self): headers = list(self.keymap.values()) headers.append(_("Total")) # We also add a total count translated_headers = [ h.translate() if lazy_translation_enabled() else h for h in headers ] counters = [len(cat_vals) for cat_vals in self.values()] + [len(self)] table = ptable.PrettyTable(field_names=translated_headers) table.add_row(counters) return table.get_string() class PurgeCommand(object): """Purges the DB by removing soft deleted entries The workflow for this purge is the following: # Find soft deleted objects which are expired # Find orphan objects # Find their related objects whether they are expired or not # Merge them together # If it does not exceed the limit, destroy them all """ ctx = context.make_context(show_deleted=True) def __init__(self, age_in_days=None, max_number=None, uuid=None, exclude_orphans=False, dry_run=None): self.age_in_days = age_in_days self.max_number = max_number self.uuid = uuid self.exclude_orphans = exclude_orphans self.dry_run = dry_run self._delete_up_to_max = None self._objects_map = WatcherObjectsMap() def get_expiry_date(self): if not self.age_in_days: return None today = datetime.datetime.today() expiry_date = today - datetime.timedelta(days=self.age_in_days) return expiry_date @classmethod def get_goal_uuid(cls, uuid_or_name): if uuid_or_name is None: return query_func = None if not utils.is_uuid_like(uuid_or_name): query_func = objects.Goal.get_by_name else: query_func = objects.Goal.get_by_uuid try: goal = query_func(cls.ctx, uuid_or_name) except Exception as exc: LOG.exception(exc) raise exception.GoalNotFound(goal=uuid_or_name) if not goal.deleted_at: raise exception.NotSoftDeletedStateError( name=_('Goal'), id=uuid_or_name) return goal.uuid def _find_goals(self, filters=None): return objects.Goal.list(self.ctx, filters=filters) def _find_strategies(self, filters=None): return objects.Strategy.list(self.ctx, filters=filters) def _find_audit_templates(self, filters=None): return objects.AuditTemplate.list(self.ctx, filters=filters) def _find_audits(self, filters=None): return objects.Audit.list(self.ctx, filters=filters) def _find_action_plans(self, filters=None): return objects.ActionPlan.list(self.ctx, filters=filters) def _find_actions(self, filters=None): return objects.Action.list(self.ctx, filters=filters) def _find_orphans(self): orphans = WatcherObjectsMap() filters = dict(deleted=False) goals = objects.Goal.list(self.ctx, filters=filters) strategies = objects.Strategy.list(self.ctx, filters=filters) audit_templates = objects.AuditTemplate.list(self.ctx, filters=filters) audits = objects.Audit.list(self.ctx, filters=filters) action_plans = objects.ActionPlan.list(self.ctx, filters=filters) actions = objects.Action.list(self.ctx, filters=filters) goal_ids = set(g.id for g in goals) orphans.strategies = [ strategy for strategy in strategies if strategy.goal_id not in goal_ids] strategy_ids = [s.id for s in (s for s in strategies if s not in orphans.strategies)] orphans.audit_templates = [ audit_template for audit_template in audit_templates if audit_template.goal_id not in goal_ids or (audit_template.strategy_id and audit_template.strategy_id not in strategy_ids)] orphans.audits = [ audit for audit in audits if audit.goal_id not in goal_ids or (audit.strategy_id and audit.strategy_id not in strategy_ids)] # Objects with orphan parents are themselves orphans audit_ids = [audit.id for audit in audits if audit not in orphans.audits] orphans.action_plans = [ ap for ap in action_plans if ap.audit_id not in audit_ids or ap.strategy_id not in strategy_ids] # Objects with orphan parents are themselves orphans action_plan_ids = [ap.id for ap in action_plans if ap not in orphans.action_plans] orphans.actions = [ action for action in actions if action.action_plan_id not in action_plan_ids] LOG.debug("Orphans found:\n%s", orphans) LOG.info("Orphans found:\n%s", orphans.get_count_table()) return orphans def _find_soft_deleted_objects(self): to_be_deleted = WatcherObjectsMap() expiry_date = self.get_expiry_date() filters = dict(deleted=True) if self.uuid: filters["uuid"] = self.uuid if expiry_date: filters.update(dict(deleted_at__lt=expiry_date)) to_be_deleted.goals.extend(self._find_goals(filters)) to_be_deleted.strategies.extend(self._find_strategies(filters)) to_be_deleted.audit_templates.extend( self._find_audit_templates(filters)) to_be_deleted.audits.extend(self._find_audits(filters)) to_be_deleted.action_plans.extend( self._find_action_plans(filters)) to_be_deleted.actions.extend(self._find_actions(filters)) soft_deleted_objs = self._find_related_objects( to_be_deleted, base_filters=dict(deleted=True)) LOG.debug("Soft deleted objects:\n%s", soft_deleted_objs) return soft_deleted_objs def _find_related_objects(self, objects_map, base_filters=None): base_filters = base_filters or {} for goal in objects_map.goals: filters = {} filters.update(base_filters) filters.update(dict(goal_id=goal.id)) related_objs = WatcherObjectsMap() related_objs.strategies = self._find_strategies(filters) related_objs.audit_templates = self._find_audit_templates(filters) related_objs.audits = self._find_audits(filters) objects_map += related_objs for strategy in objects_map.strategies: filters = {} filters.update(base_filters) filters.update(dict(strategy_id=strategy.id)) related_objs = WatcherObjectsMap() related_objs.audit_templates = self._find_audit_templates(filters) related_objs.audits = self._find_audits(filters) objects_map += related_objs for audit in objects_map.audits: filters = {} filters.update(base_filters) filters.update(dict(audit_id=audit.id)) related_objs = WatcherObjectsMap() related_objs.action_plans = self._find_action_plans(filters) objects_map += related_objs for action_plan in objects_map.action_plans: filters = {} filters.update(base_filters) filters.update(dict(action_plan_id=action_plan.id)) related_objs = WatcherObjectsMap() related_objs.actions = self._find_actions(filters) objects_map += related_objs return objects_map def confirmation_prompt(self): print(self._objects_map.get_count_table()) raw_val = input( _("There are %(count)d objects set for deletion. " "Continue? [y/N]") % dict(count=len(self._objects_map))) return strutils.bool_from_string(raw_val) def delete_up_to_max_prompt(self, objects_map): print(objects_map.get_count_table()) print(_("The number of objects (%(num)s) to delete from the database " "exceeds the maximum number of objects (%(max_number)s) " "specified.") % dict(max_number=self.max_number, num=len(objects_map))) raw_val = input( _("Do you want to delete objects up to the specified maximum " "number? [y/N]")) self._delete_up_to_max = strutils.bool_from_string(raw_val) return self._delete_up_to_max def _aggregate_objects(self): """Objects aggregated on a 'per goal' basis""" # todo: aggregate orphans as well aggregate = [] for goal in self._objects_map.goals: related_objs = WatcherObjectsMap() # goals related_objs.goals = [goal] # strategies goal_ids = [goal.id] related_objs.strategies = [ strategy for strategy in self._objects_map.strategies if strategy.goal_id in goal_ids ] # audit templates strategy_ids = [ strategy.id for strategy in related_objs.strategies] related_objs.audit_templates = [ at for at in self._objects_map.audit_templates if at.goal_id in goal_ids or (at.strategy_id and at.strategy_id in strategy_ids) ] # audits related_objs.audits = [ audit for audit in self._objects_map.audits if audit.goal_id in goal_ids ] # action plans audit_ids = [audit.id for audit in related_objs.audits] related_objs.action_plans = [ action_plan for action_plan in self._objects_map.action_plans if action_plan.audit_id in audit_ids ] # actions action_plan_ids = [ action_plan.id for action_plan in related_objs.action_plans ] related_objs.actions = [ action for action in self._objects_map.actions if action.action_plan_id in action_plan_ids ] aggregate.append(related_objs) return aggregate def _get_objects_up_to_limit(self): aggregated_objects = self._aggregate_objects() to_be_deleted_subset = WatcherObjectsMap() for aggregate in aggregated_objects: if len(aggregate) + len(to_be_deleted_subset) <= self.max_number: to_be_deleted_subset += aggregate else: break LOG.debug(to_be_deleted_subset) return to_be_deleted_subset def find_objects_to_delete(self): """Finds all the objects to be purged :returns: A mapping with all the Watcher objects to purged :rtype: :py:class:`~.WatcherObjectsMap` instance """ to_be_deleted = self._find_soft_deleted_objects() if not self.exclude_orphans: to_be_deleted += self._find_orphans() LOG.debug("Objects to be deleted:\n%s", to_be_deleted) return to_be_deleted def do_delete(self): LOG.info("Deleting...") # Reversed to avoid errors with foreign keys for entry in reversed(list(self._objects_map)): entry.destroy() def execute(self): LOG.info("Starting purge command") self._objects_map = self.find_objects_to_delete() if (self.max_number is not None and len(self._objects_map) > self.max_number): if self.delete_up_to_max_prompt(self._objects_map): self._objects_map = self._get_objects_up_to_limit() else: return _orphans_note = (_(" (orphans excluded)") if self.exclude_orphans else _(" (may include orphans)")) if not self.dry_run and self.confirmation_prompt(): self.do_delete() print(_("Purge results summary%s:") % _orphans_note) LOG.info("Purge results summary%s:", _orphans_note) else: LOG.debug(self._objects_map) print(_("Here below is a table containing the objects " "that can be purged%s:") % _orphans_note) LOG.info("\n%s", self._objects_map.get_count_table()) print(self._objects_map.get_count_table()) LOG.info("Purge process completed") def purge(age_in_days, max_number, goal, exclude_orphans, dry_run): """Removes soft deleted objects from the database :param age_in_days: Number of days since deletion (from today) to exclude from the purge. If None, everything will be purged. :type age_in_days: int :param max_number: Max number of objects expected to be deleted. Prevents the deletion if exceeded. No limit if set to None. :type max_number: int :param goal: UUID or name of the goal to purge. :type goal: str :param exclude_orphans: Flag to indicate whether or not you want to exclude orphans from deletion (default: False). :type exclude_orphans: bool :param dry_run: Flag to indicate whether or not you want to perform a dry run (no deletion). :type dry_run: bool """ try: if max_number and max_number < 0: raise exception.NegativeLimitError LOG.info("[options] age_in_days = %s", age_in_days) LOG.info("[options] max_number = %s", max_number) LOG.info("[options] goal = %s", goal) LOG.info("[options] exclude_orphans = %s", exclude_orphans) LOG.info("[options] dry_run = %s", dry_run) uuid = PurgeCommand.get_goal_uuid(goal) cmd = PurgeCommand(age_in_days, max_number, uuid, exclude_orphans, dry_run) cmd.execute() except Exception as exc: LOG.exception(exc) print(exc) sys.exit(1) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/db/sqlalchemy/0000775000175000017500000000000000000000000021041 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/__init__.py0000664000175000017500000000000000000000000023140 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/0000775000175000017500000000000000000000000022435 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/README.rst0000664000175000017500000000362600000000000024133 0ustar00zuulzuul00000000000000The migrations in the alembic/versions contain the changes needed to migrate from older Watcher releases to newer versions. A migration occurs by executing a script that details the changes needed to upgrade/downgrade the database. The migration scripts are ordered so that multiple scripts can run sequentially to update the database. The scripts are executed by Watcher's migration wrapper which uses the Alembic library to manage the migration. Watcher supports migration from Ocata or later. If you are a deployer or developer and want to migrate from Ocata to later release you must first add version tracking to the database:: $ watcher-db-manage --config-file /path/to/watcher.conf stamp ocata You can upgrade to the latest database version via:: $ watcher-db-manage --config-file /path/to/watcher.conf upgrade head To check the current database version:: $ watcher-db-manage --config-file /path/to/watcher.conf version To create a script to run the migration offline:: $ watcher-db-manage --config-file /path/to/watcher.conf upgrade head --sql To run the offline migration between specific migration versions:: $ watcher-db-manage --config-file /path/to/watcher.conf upgrade \ : --sql Upgrade the database incrementally:: $ watcher-db-manage --config-file /path/to/watcher.conf upgrade --revision \ <# of revs> Downgrade the database by a certain number of revisions:: $ watcher-db-manage --config-file /path/to/watcher.conf downgrade --revision \ <# of revs> Create new revision:: $ watcher-db-manage --config-file /path/to/watcher.conf revision \ -m "description of revision" --autogenerate Create a blank file:: $ watcher-db-manage --config-file /path/to/watcher.conf revision \ -m "description of revision" Please see https://alembic.readthedocs.org/en/latest/index.html for general documentation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/env.py0000664000175000017500000000335600000000000023606 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as log_config from alembic import context from oslo_db.sqlalchemy import enginefacade from watcher.db.sqlalchemy import models # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. log_config.fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel target_metadata = models.Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = enginefacade.writer.get_engine() with engine.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/script.py.mako0000664000175000017500000000063400000000000025244 0ustar00zuulzuul00000000000000"""${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} def downgrade(): ${downgrades if downgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/versions/0000775000175000017500000000000000000000000024305 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/versions/001_ocata.py0000664000175000017500000002243400000000000026333 0ustar00zuulzuul00000000000000"""ocata release Revision ID: 9894235b4278 Revises: None Create Date: 2017-02-01 09:40:05.065981 """ from alembic import op import oslo_db import sqlalchemy as sa from watcher.db.sqlalchemy import models # revision identifiers, used by Alembic. revision = '001' down_revision = None def upgrade(): op.create_table( 'goals', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=63), nullable=False), sa.Column('display_name', sa.String(length=63), nullable=False), sa.Column('efficacy_specification', models.JSONEncodedList(), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'deleted', name='uniq_goals0name'), sa.UniqueConstraint('uuid', name='uniq_goals0uuid') ) op.create_table( 'scoring_engines', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=63), nullable=False), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('metainfo', sa.Text(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'deleted', name='uniq_scoring_engines0name'), sa.UniqueConstraint('uuid', name='uniq_scoring_engines0uuid') ) op.create_table( 'services', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('last_seen_up', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('host', 'name', 'deleted', name='uniq_services0host0name0deleted') ) op.create_table( 'strategies', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=63), nullable=False), sa.Column('display_name', sa.String(length=63), nullable=False), sa.Column('goal_id', sa.Integer(), nullable=False), sa.Column('parameters_spec', models.JSONEncodedDict(), nullable=True), sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'deleted', name='uniq_strategies0name'), sa.UniqueConstraint('uuid', name='uniq_strategies0uuid') ) op.create_table( 'audit_templates', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=63), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('goal_id', sa.Integer(), nullable=False), sa.Column('strategy_id', sa.Integer(), nullable=True), sa.Column('scope', models.JSONEncodedList(), nullable=True), sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'deleted', name='uniq_audit_templates0name'), sa.UniqueConstraint('uuid', name='uniq_audit_templates0uuid') ) op.create_table( 'audits', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('audit_type', sa.String(length=20), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('parameters', models.JSONEncodedDict(), nullable=True), sa.Column('interval', sa.Integer(), nullable=True), sa.Column('goal_id', sa.Integer(), nullable=False), sa.Column('strategy_id', sa.Integer(), nullable=True), sa.Column('scope', models.JSONEncodedList(), nullable=True), sa.Column('auto_trigger', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_audits0uuid') ) op.create_table( 'action_plans', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('audit_id', sa.Integer(), nullable=False), sa.Column('strategy_id', sa.Integer(), nullable=False), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('global_efficacy', models.JSONEncodedDict(), nullable=True), sa.ForeignKeyConstraint(['audit_id'], ['audits.id'], ), sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_action_plans0uuid') ) op.create_table( 'actions', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('action_plan_id', sa.Integer(), nullable=False), sa.Column('action_type', sa.String(length=255), nullable=False), sa.Column('input_parameters', models.JSONEncodedDict(), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('parents', models.JSONEncodedList(), nullable=True), sa.ForeignKeyConstraint(['action_plan_id'], ['action_plans.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_actions0uuid') ) op.create_table( 'efficacy_indicators', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=63), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('unit', sa.String(length=63), nullable=True), sa.Column('value', sa.Numeric(), nullable=True), sa.Column('action_plan_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['action_plan_id'], ['action_plans.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_efficacy_indicators0uuid') ) def downgrade(): op.drop_table('efficacy_indicators') op.drop_table('actions') op.drop_table('action_plans') op.drop_table('audits') op.drop_table('audit_templates') op.drop_table('strategies') op.drop_table('services') op.drop_table('scoring_engines') op.drop_table('goals') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/versions/0f6042416884_add_apscheduler_jobs.py0000664000175000017500000000217100000000000032404 0ustar00zuulzuul00000000000000"""Add apscheduler_jobs table to store background jobs Revision ID: 0f6042416884 Revises: 001 Create Date: 2017-03-24 11:21:29.036532 """ from alembic import op from sqlalchemy import inspect import sqlalchemy as sa from watcher.db.sqlalchemy import models # revision identifiers, used by Alembic. revision = '0f6042416884' down_revision = '001' def _table_exists(table_name): bind = op.get_context().bind insp = inspect(bind) names = insp.get_table_names() return any(t == table_name for t in names) def upgrade(): if _table_exists('apscheduler_jobs'): return op.create_table( 'apscheduler_jobs', sa.Column('id', sa.Unicode(191), nullable=False), sa.Column('next_run_time', sa.Float(25), index=True), sa.Column('job_state', sa.LargeBinary, nullable=False), sa.Column('service_id', sa.Integer(), nullable=False), sa.Column('tag', models.JSONEncodedDict(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.ForeignKeyConstraint(['service_id'], ['services.id']) ) def downgrade(): op.drop_table('apscheduler_jobs') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/versions/3cfc94cecf4e_add_name_for_audit.py0000664000175000017500000000064400000000000032574 0ustar00zuulzuul00000000000000"""add name for audit Revision ID: 3cfc94cecf4e Revises: d098df6021e2 Create Date: 2017-07-19 15:44:57.661099 """ # revision identifiers, used by Alembic. revision = '3cfc94cecf4e' down_revision = 'd09a5945e4a0' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('audits', sa.Column('name', sa.String(length=63), nullable=True)) def downgrade(): op.drop_column('audits', 'name') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/versions/4b16194c56bc_add_start_end_time.py0000664000175000017500000000104300000000000032303 0ustar00zuulzuul00000000000000"""add_start_end_time Revision ID: 4b16194c56bc Revises: 52804f2498c4 Create Date: 2018-03-23 00:36:29.031259 """ # revision identifiers, used by Alembic. revision = '4b16194c56bc' down_revision = '52804f2498c4' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('audits', sa.Column('start_time', sa.DateTime(), nullable=True)) op.add_column('audits', sa.Column('end_time', sa.DateTime(), nullable=True)) def downgrade(): op.drop_column('audits', 'start_time') op.drop_column('audits', 'end_time') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/versions/52804f2498c4_add_hostname.py0000664000175000017500000000110700000000000030772 0ustar00zuulzuul00000000000000"""Add hostname field to both Audit and Action Plan models Revision ID: 52804f2498c4 Revises: a86240e89a29 Create Date: 2018-06-26 13:06:45.530387 """ # revision identifiers, used by Alembic. revision = '52804f2498c4' down_revision = 'a86240e89a29' from alembic import op import sqlalchemy as sa def upgrade(): for table in ('audits', 'action_plans'): op.add_column( table, sa.Column('hostname', sa.String(length=255), nullable=True)) def downgrade(): for table in ('audits', 'action_plans'): op.drop_column(table, 'hostname') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/versions/609bec748f2a_add_force_field.py0000664000175000017500000000063100000000000031634 0ustar00zuulzuul00000000000000"""add_force_field Revision ID: 609bec748f2a Revises: 4b16194c56bc Create Date: 2019-05-05 14:06:14.249124 """ # revision identifiers, used by Alembic. revision = '609bec748f2a' down_revision = '4b16194c56bc' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('audits', sa.Column('force', sa.Boolean, default=False)) def downgrade(): op.drop_column('audits', 'force') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/versions/a86240e89a29_.py0000664000175000017500000000247600000000000026515 0ustar00zuulzuul00000000000000"""Set name for Audit as part of backward compatibility Revision ID: a86240e89a29 Revises: 3cfc94cecf4e Create Date: 2017-12-21 13:00:09.278587 """ # revision identifiers, used by Alembic. revision = 'a86240e89a29' down_revision = '3cfc94cecf4e' from alembic import op from sqlalchemy.orm import sessionmaker from watcher.db.sqlalchemy import models def upgrade(): connection = op.get_bind() session = sessionmaker() s = session(bind=connection) audits = s.query( models.Audit.strategy_id.label('strategy_id'), models.Audit.created_at.label('created_at')).filter( models.Audit.name is None).all() for audit in audits: strategy_name = s.query(models.Strategy).filter_by( id=audit.strategy_id).one().name s.query().filter(models.Audit.name is None).update( {'name': strategy_name + '-' + str(audit.created_at)}) s.commit() def downgrade(): connection = op.get_bind() session = sessionmaker() s = session(bind=connection) audits = s.query( models.Audit.strategy_id.label('strategy_id'), models.Audit.created_at.label('created_at')).filter( models.Audit.name is not None).all() for audit in audits: s.query().filter(models.Audit.name is not None).update( {'name': None}) s.commit() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/versions/d098df6021e2_cron_support_for_audit.py0000664000175000017500000000124100000000000033264 0ustar00zuulzuul00000000000000"""Add cron support for audit table Revision ID: d098df6021e2 Revises: 0f6042416884 Create Date: 2017-06-08 16:21:35.746752 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd098df6021e2' down_revision = '0f6042416884' def upgrade(): op.alter_column('audits', 'interval', existing_type=sa.String(36), nullable=True) op.add_column('audits', sa.Column('next_run_time', sa.DateTime(), nullable=True)) def downgrade(): op.alter_column('audits', 'interval', existing_type=sa.Integer(), nullable=True) op.drop_column('audits', 'next_run_time') ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/versions/d09a5945e4a0_add_action_description_table.py 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic/versions/d09a5945e4a0_add_action_description_tab0000664000175000017500000000206400000000000033366 0ustar00zuulzuul00000000000000"""add action description table Revision ID: d09a5945e4a0 Revises: d098df6021e2 Create Date: 2017-07-13 20:33:01.473711 """ from alembic import op import oslo_db import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd09a5945e4a0' down_revision = 'd098df6021e2' def upgrade(): op.create_table( 'action_descriptions', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('action_type', sa.String(length=255), nullable=False), sa.Column('description', sa.String(length=255), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('action_type', name='uniq_action_description0action_type') ) def downgrade(): op.drop_table('action_descriptions') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/alembic.ini0000664000175000017500000000171700000000000023144 0ustar00zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/alembic # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false #sqlalchemy.url = driver://user:pass@localhost/dbname # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/api.py0000664000175000017500000012642500000000000022176 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" import collections import datetime import operator import threading from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import utils as db_utils from oslo_utils import timeutils from sqlalchemy.inspection import inspect from sqlalchemy.orm import exc from sqlalchemy.orm import joinedload from watcher._i18n import _ from watcher.common import exception from watcher.common import utils from watcher.db import api from watcher.db.sqlalchemy import models from watcher import objects CONF = cfg.CONF _CONTEXT = threading.local() def get_backend(): """The backend is this module itself.""" return Connection() def _session_for_read(): return enginefacade.reader.using(_CONTEXT) # NOTE(tylerchristie) Please add @oslo_db_api.retry_on_deadlock decorator to # any new methods using _session_for_write (as deadlocks happen on write), so # that oslo_db is able to retry in case of deadlocks. def _session_for_write(): return enginefacade.writer.using(_CONTEXT) def add_identity_filter(query, value): """Adds an identity filter to a query. Filters results by ID, if supplied value is a valid integer. Otherwise attempts to filter results by UUID. :param query: Initial query to add filter to. :param value: Value for filtering results by. :return: Modified query. """ if utils.is_int_like(value): return query.filter_by(id=value) elif utils.is_uuid_like(value): return query.filter_by(uuid=value) else: raise exception.InvalidIdentity(identity=value) def _paginate_query(model, limit=None, marker=None, sort_key=None, sort_dir=None, query=None): sort_keys = ['id'] if sort_key and sort_key not in sort_keys: sort_keys.insert(0, sort_key) query = db_utils.paginate_query(query, model, limit, sort_keys, marker=marker, sort_dir=sort_dir) return query.all() class JoinMap(utils.Struct): """Mapping for the Join-based queries""" NaturalJoinFilter = collections.namedtuple( 'NaturalJoinFilter', ['join_fieldname', 'join_model']) class Connection(api.BaseConnection): """SqlAlchemy connection.""" valid_operators = { "": operator.eq, "eq": operator.eq, "neq": operator.ne, "gt": operator.gt, "gte": operator.ge, "lt": operator.lt, "lte": operator.le, "in": lambda field, choices: field.in_(choices), "notin": lambda field, choices: field.notin_(choices), } def __init__(self): super(Connection, self).__init__() def __add_simple_filter(self, query, model, fieldname, value, operator_): field = getattr(model, fieldname) if (fieldname != 'deleted' and value and field.type.python_type is datetime.datetime): if not isinstance(value, datetime.datetime): value = timeutils.parse_isotime(value) return query.filter(self.valid_operators[operator_](field, value)) def __add_join_filter(self, query, model, fieldname, value, operator_): query = query.join(model) return self.__add_simple_filter(query, model, fieldname, value, operator_) def __decompose_filter(self, raw_fieldname): """Decompose a filter name into its 2 subparts A filter can take 2 forms: - "" which is a syntactic sugar for "__eq" - "__" where is the comparison operator to be used. Available operators are: - eq - neq - gt - gte - lt - lte - in - notin """ separator = '__' fieldname, separator, operator_ = raw_fieldname.partition(separator) if operator_ and operator_ not in self.valid_operators: raise exception.InvalidOperator( operator=operator_, valid_operators=self.valid_operators) return fieldname, operator_ def _add_filters(self, query, model, filters=None, plain_fields=None, join_fieldmap=None): """Generic way to add filters to a Watcher model Each filter key provided by the `filters` parameter will be decomposed into 2 pieces: the field name and the comparison operator - "": By default, the "eq" is applied if no operator is provided - "eq", which stands for "equal" : e.g. {"state__eq": "PENDING"} will result in the "WHERE state = 'PENDING'" clause. - "neq", which stands for "not equal" : e.g. {"state__neq": "PENDING"} will result in the "WHERE state != 'PENDING'" clause. - "gt", which stands for "greater than" : e.g. {"created_at__gt": "2016-06-06T10:33:22.063176"} will result in the "WHERE created_at > '2016-06-06T10:33:22.063176'" clause. - "gte", which stands for "greater than or equal to" : e.g. {"created_at__gte": "2016-06-06T10:33:22.063176"} will result in the "WHERE created_at >= '2016-06-06T10:33:22.063176'" clause. - "lt", which stands for "less than" : e.g. {"created_at__lt": "2016-06-06T10:33:22.063176"} will result in the "WHERE created_at < '2016-06-06T10:33:22.063176'" clause. - "lte", which stands for "less than or equal to" : e.g. {"created_at__lte": "2016-06-06T10:33:22.063176"} will result in the "WHERE created_at <= '2016-06-06T10:33:22.063176'" clause. - "in": e.g. {"state__in": ('SUCCEEDED', 'FAILED')} will result in the "WHERE state IN ('SUCCEEDED', 'FAILED')" clause. :param query: a :py:class:`sqlalchemy.orm.query.Query` instance :param model: the model class the filters should relate to :param filters: dict with the following structure {"fieldname": value} :param plain_fields: a :py:class:`sqlalchemy.orm.query.Query` instance :param join_fieldmap: a :py:class:`sqlalchemy.orm.query.Query` instance """ soft_delete_mixin_fields = ['deleted', 'deleted_at'] timestamp_mixin_fields = ['created_at', 'updated_at'] filters = filters or {} # Special case for 'deleted' because it is a non-boolean flag if 'deleted' in filters: deleted_filter = filters.pop('deleted') op = 'eq' if not bool(deleted_filter) else 'neq' filters['deleted__%s' % op] = 0 plain_fields = tuple( (list(plain_fields) or []) + soft_delete_mixin_fields + timestamp_mixin_fields) join_fieldmap = join_fieldmap or {} for raw_fieldname, value in filters.items(): fieldname, operator_ = self.__decompose_filter(raw_fieldname) if fieldname in plain_fields: query = self.__add_simple_filter( query, model, fieldname, value, operator_) elif fieldname in join_fieldmap: join_field, join_model = join_fieldmap[fieldname] query = self.__add_join_filter( query, join_model, join_field, value, operator_) return query @staticmethod def _get_relationships(model): return inspect(model).relationships @staticmethod def _set_eager_options(model, query): relationships = inspect(model).relationships for relationship in relationships: if not relationship.uselist: # We have a One-to-X relationship query = query.options(joinedload( getattr(model, relationship.key))) return query @oslo_db_api.retry_on_deadlock def _create(self, model, values): with _session_for_write() as session: obj = model() cleaned_values = {k: v for k, v in values.items() if k not in self._get_relationships(model)} obj.update(cleaned_values) session.add(obj) session.flush() return obj def _get(self, context, model, fieldname, value, eager): with _session_for_read() as session: query = session.query(model) if eager: query = self._set_eager_options(model, query) query = query.filter(getattr(model, fieldname) == value) if not context.show_deleted: query = query.filter(model.deleted_at.is_(None)) try: obj = query.one() except exc.NoResultFound: raise exception.ResourceNotFound(name=model.__name__, id=value) return obj @staticmethod @oslo_db_api.retry_on_deadlock def _update(model, id_, values): with _session_for_write() as session: query = session.query(model) query = add_identity_filter(query, id_) try: ref = query.with_for_update().one() except exc.NoResultFound: raise exception.ResourceNotFound(name=model.__name__, id=id_) ref.update(values) return ref @staticmethod @oslo_db_api.retry_on_deadlock def _soft_delete(model, id_): with _session_for_write() as session: query = session.query(model) query = add_identity_filter(query, id_) try: row = query.one() except exc.NoResultFound: raise exception.ResourceNotFound(name=model.__name__, id=id_) row.soft_delete(session) return row @staticmethod @oslo_db_api.retry_on_deadlock def _destroy(model, id_): with _session_for_write() as session: query = session.query(model) query = add_identity_filter(query, id_) try: query.one() except exc.NoResultFound: raise exception.ResourceNotFound(name=model.__name__, id=id_) query.delete() def _get_model_list(self, model, add_filters_func, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): with _session_for_read() as session: query = session.query(model) if eager: query = self._set_eager_options(model, query) query = add_filters_func(query, filters) if not context.show_deleted: query = query.filter(model.deleted_at.is_(None)) return _paginate_query(model, limit, marker, sort_key, sort_dir, query) # NOTE(erakli): _add_..._filters methods should be refactored to have same # content. join_fieldmap should be filled with JoinMap instead of dict def _add_goals_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'name', 'display_name'] return self._add_filters( query=query, model=models.Goal, filters=filters, plain_fields=plain_fields) def _add_strategies_filters(self, query, filters): plain_fields = ['uuid', 'name', 'display_name', 'goal_id'] join_fieldmap = JoinMap( goal_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.Goal), goal_name=NaturalJoinFilter( join_fieldname="name", join_model=models.Goal)) return self._add_filters( query=query, model=models.Strategy, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) def _add_audit_templates_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'name', 'goal_id', 'strategy_id'] join_fieldmap = JoinMap( goal_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.Goal), goal_name=NaturalJoinFilter( join_fieldname="name", join_model=models.Goal), strategy_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.Strategy), strategy_name=NaturalJoinFilter( join_fieldname="name", join_model=models.Strategy), ) return self._add_filters( query=query, model=models.AuditTemplate, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) def _add_audits_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'audit_type', 'state', 'goal_id', 'strategy_id', 'hostname'] join_fieldmap = { 'goal_uuid': ("uuid", models.Goal), 'goal_name': ("name", models.Goal), 'strategy_uuid': ("uuid", models.Strategy), 'strategy_name': ("name", models.Strategy), } return self._add_filters( query=query, model=models.Audit, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) def _add_action_plans_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'state', 'audit_id', 'strategy_id'] join_fieldmap = JoinMap( audit_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.Audit), strategy_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.Strategy), strategy_name=NaturalJoinFilter( join_fieldname="name", join_model=models.Strategy), ) return self._add_filters( query=query, model=models.ActionPlan, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) def _add_actions_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'state', 'action_plan_id'] join_fieldmap = { 'action_plan_uuid': ("uuid", models.ActionPlan), } query = self._add_filters( query=query, model=models.Action, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) if 'audit_uuid' in filters: with _session_for_read() as session: stmt = session.query(models.ActionPlan).join( models.Audit, models.Audit.id == models.ActionPlan.audit_id)\ .filter_by(uuid=filters['audit_uuid']).subquery() query = query.filter_by(action_plan_id=stmt.c.id) return query def _add_efficacy_indicators_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'name', 'unit', 'schema', 'action_plan_id'] join_fieldmap = JoinMap( action_plan_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.ActionPlan), ) return self._add_filters( query=query, model=models.EfficacyIndicator, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) def _add_scoring_engine_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['id', 'description'] return self._add_filters( query=query, model=models.ScoringEngine, filters=filters, plain_fields=plain_fields) def _add_action_descriptions_filters(self, query, filters): if not filters: filters = {} plain_fields = ['id', 'action_type'] return self._add_filters( query=query, model=models.ActionDescription, filters=filters, plain_fields=plain_fields) def _add_services_filters(self, query, filters): if not filters: filters = {} plain_fields = ['id', 'name', 'host'] return self._add_filters( query=query, model=models.Service, filters=filters, plain_fields=plain_fields) # ### GOALS ### # def get_goal_list(self, *args, **kwargs): return self._get_model_list(models.Goal, self._add_goals_filters, *args, **kwargs) def create_goal(self, values): # ensure defaults are present for new goals if not values.get('uuid'): values['uuid'] = utils.generate_uuid() try: goal = self._create(models.Goal, values) except db_exc.DBDuplicateEntry: raise exception.GoalAlreadyExists(uuid=values['uuid']) return goal def _get_goal(self, context, fieldname, value, eager): try: return self._get(context, model=models.Goal, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.GoalNotFound(goal=value) def get_goal_by_id(self, context, goal_id, eager=False): return self._get_goal( context, fieldname="id", value=goal_id, eager=eager) def get_goal_by_uuid(self, context, goal_uuid, eager=False): return self._get_goal( context, fieldname="uuid", value=goal_uuid, eager=eager) def get_goal_by_name(self, context, goal_name, eager=False): return self._get_goal( context, fieldname="name", value=goal_name, eager=eager) def destroy_goal(self, goal_id): try: return self._destroy(models.Goal, goal_id) except exception.ResourceNotFound: raise exception.GoalNotFound(goal=goal_id) def update_goal(self, goal_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing Goal.")) try: return self._update(models.Goal, goal_id, values) except exception.ResourceNotFound: raise exception.GoalNotFound(goal=goal_id) def soft_delete_goal(self, goal_id): try: return self._soft_delete(models.Goal, goal_id) except exception.ResourceNotFound: raise exception.GoalNotFound(goal=goal_id) # ### STRATEGIES ### # def get_strategy_list(self, *args, **kwargs): return self._get_model_list(models.Strategy, self._add_strategies_filters, *args, **kwargs) def create_strategy(self, values): # ensure defaults are present for new strategies if not values.get('uuid'): values['uuid'] = utils.generate_uuid() try: strategy = self._create(models.Strategy, values) except db_exc.DBDuplicateEntry: raise exception.StrategyAlreadyExists(uuid=values['uuid']) return strategy def _get_strategy(self, context, fieldname, value, eager): try: return self._get(context, model=models.Strategy, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.StrategyNotFound(strategy=value) def get_strategy_by_id(self, context, strategy_id, eager=False): return self._get_strategy( context, fieldname="id", value=strategy_id, eager=eager) def get_strategy_by_uuid(self, context, strategy_uuid, eager=False): return self._get_strategy( context, fieldname="uuid", value=strategy_uuid, eager=eager) def get_strategy_by_name(self, context, strategy_name, eager=False): return self._get_strategy( context, fieldname="name", value=strategy_name, eager=eager) def destroy_strategy(self, strategy_id): try: return self._destroy(models.Strategy, strategy_id) except exception.ResourceNotFound: raise exception.StrategyNotFound(strategy=strategy_id) def update_strategy(self, strategy_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing Strategy.")) try: return self._update(models.Strategy, strategy_id, values) except exception.ResourceNotFound: raise exception.StrategyNotFound(strategy=strategy_id) def soft_delete_strategy(self, strategy_id): try: return self._soft_delete(models.Strategy, strategy_id) except exception.ResourceNotFound: raise exception.StrategyNotFound(strategy=strategy_id) # ### AUDIT TEMPLATES ### # def get_audit_template_list(self, *args, **kwargs): return self._get_model_list(models.AuditTemplate, self._add_audit_templates_filters, *args, **kwargs) def create_audit_template(self, values): # ensure defaults are present for new audit_templates if not values.get('uuid'): values['uuid'] = utils.generate_uuid() with _session_for_write() as session: query = session.query(models.AuditTemplate) query = query.filter_by(name=values.get('name'), deleted_at=None) if len(query.all()) > 0: raise exception.AuditTemplateAlreadyExists( audit_template=values['name']) try: audit_template = self._create(models.AuditTemplate, values) except db_exc.DBDuplicateEntry: raise exception.AuditTemplateAlreadyExists( audit_template=values['name']) return audit_template def _get_audit_template(self, context, fieldname, value, eager): try: return self._get(context, model=models.AuditTemplate, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.AuditTemplateNotFound(audit_template=value) def get_audit_template_by_id(self, context, audit_template_id, eager=False): return self._get_audit_template( context, fieldname="id", value=audit_template_id, eager=eager) def get_audit_template_by_uuid(self, context, audit_template_uuid, eager=False): return self._get_audit_template( context, fieldname="uuid", value=audit_template_uuid, eager=eager) def get_audit_template_by_name(self, context, audit_template_name, eager=False): return self._get_audit_template( context, fieldname="name", value=audit_template_name, eager=eager) def destroy_audit_template(self, audit_template_id): try: return self._destroy(models.AuditTemplate, audit_template_id) except exception.ResourceNotFound: raise exception.AuditTemplateNotFound( audit_template=audit_template_id) def update_audit_template(self, audit_template_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing " "Audit Template.")) try: return self._update( models.AuditTemplate, audit_template_id, values) except exception.ResourceNotFound: raise exception.AuditTemplateNotFound( audit_template=audit_template_id) def soft_delete_audit_template(self, audit_template_id): try: return self._soft_delete(models.AuditTemplate, audit_template_id) except exception.ResourceNotFound: raise exception.AuditTemplateNotFound( audit_template=audit_template_id) # ### AUDITS ### # def get_audit_list(self, *args, **kwargs): return self._get_model_list(models.Audit, self._add_audits_filters, *args, **kwargs) def create_audit(self, values): # ensure defaults are present for new audits if not values.get('uuid'): values['uuid'] = utils.generate_uuid() with _session_for_write() as session: query = session.query(models.Audit) query = query.filter_by(name=values.get('name'), deleted_at=None) if len(query.all()) > 0: raise exception.AuditAlreadyExists( audit=values['name']) if values.get('state') is None: values['state'] = objects.audit.State.PENDING if not values.get('auto_trigger'): values['auto_trigger'] = False try: audit = self._create(models.Audit, values) except db_exc.DBDuplicateEntry: raise exception.AuditAlreadyExists(audit=values['uuid']) return audit def _get_audit(self, context, fieldname, value, eager): try: return self._get(context, model=models.Audit, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.AuditNotFound(audit=value) def get_audit_by_id(self, context, audit_id, eager=False): return self._get_audit( context, fieldname="id", value=audit_id, eager=eager) def get_audit_by_uuid(self, context, audit_uuid, eager=False): return self._get_audit( context, fieldname="uuid", value=audit_uuid, eager=eager) def get_audit_by_name(self, context, audit_name, eager=False): return self._get_audit( context, fieldname="name", value=audit_name, eager=eager) def destroy_audit(self, audit_id): def is_audit_referenced(session, audit_id): """Checks whether the audit is referenced by action_plan(s).""" query = session.query(models.ActionPlan) query = self._add_action_plans_filters( query, {'audit_id': audit_id}) return query.count() != 0 with _session_for_write() as session: query = session.query(models.Audit) query = add_identity_filter(query, audit_id) try: audit_ref = query.one() except exc.NoResultFound: raise exception.AuditNotFound(audit=audit_id) if is_audit_referenced(session, audit_ref['id']): raise exception.AuditReferenced(audit=audit_id) query.delete() def update_audit(self, audit_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing " "Audit.")) try: return self._update(models.Audit, audit_id, values) except exception.ResourceNotFound: raise exception.AuditNotFound(audit=audit_id) def soft_delete_audit(self, audit_id): try: return self._soft_delete(models.Audit, audit_id) except exception.ResourceNotFound: raise exception.AuditNotFound(audit=audit_id) # ### ACTIONS ### # def get_action_list(self, *args, **kwargs): return self._get_model_list(models.Action, self._add_actions_filters, *args, **kwargs) def create_action(self, values): # ensure defaults are present for new actions if not values.get('uuid'): values['uuid'] = utils.generate_uuid() if values.get('state') is None: values['state'] = objects.action.State.PENDING try: action = self._create(models.Action, values) except db_exc.DBDuplicateEntry: raise exception.ActionAlreadyExists(uuid=values['uuid']) return action def _get_action(self, context, fieldname, value, eager): try: return self._get(context, model=models.Action, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.ActionNotFound(action=value) def get_action_by_id(self, context, action_id, eager=False): return self._get_action( context, fieldname="id", value=action_id, eager=eager) def get_action_by_uuid(self, context, action_uuid, eager=False): return self._get_action( context, fieldname="uuid", value=action_uuid, eager=eager) def destroy_action(self, action_id): with _session_for_write() as session: query = session.query(models.Action) query = add_identity_filter(query, action_id) count = query.delete() if count != 1: raise exception.ActionNotFound(action_id) def update_action(self, action_id, values): # NOTE(dtantsur): this can lead to very strange errors if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing Action.")) return self._do_update_action(action_id, values) @staticmethod def _do_update_action(action_id, values): with _session_for_write() as session: query = session.query(models.Action) query = add_identity_filter(query, action_id) try: ref = query.with_for_update().one() except exc.NoResultFound: raise exception.ActionNotFound(action=action_id) ref.update(values) return ref def soft_delete_action(self, action_id): try: return self._soft_delete(models.Action, action_id) except exception.ResourceNotFound: raise exception.ActionNotFound(action=action_id) # ### ACTION PLANS ### # def get_action_plan_list(self, *args, **kwargs): return self._get_model_list(models.ActionPlan, self._add_action_plans_filters, *args, **kwargs) def create_action_plan(self, values): # ensure defaults are present for new audits if not values.get('uuid'): values['uuid'] = utils.generate_uuid() try: action_plan = self._create(models.ActionPlan, values) except db_exc.DBDuplicateEntry: raise exception.ActionPlanAlreadyExists(uuid=values['uuid']) return action_plan def _get_action_plan(self, context, fieldname, value, eager): try: return self._get(context, model=models.ActionPlan, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.ActionPlanNotFound(action_plan=value) def get_action_plan_by_id(self, context, action_plan_id, eager=False): return self._get_action_plan( context, fieldname="id", value=action_plan_id, eager=eager) def get_action_plan_by_uuid(self, context, action_plan_uuid, eager=False): return self._get_action_plan( context, fieldname="uuid", value=action_plan_uuid, eager=eager) def destroy_action_plan(self, action_plan_id): def is_action_plan_referenced(session, action_plan_id): """Checks whether the action_plan is referenced by action(s).""" query = session.query(models.Action) query = self._add_actions_filters( query, {'action_plan_id': action_plan_id}) return query.count() != 0 with _session_for_write() as session: query = session.query(models.ActionPlan) query = add_identity_filter(query, action_plan_id) try: action_plan_ref = query.one() except exc.NoResultFound: raise exception.ActionPlanNotFound(action_plan=action_plan_id) if is_action_plan_referenced(session, action_plan_ref['id']): raise exception.ActionPlanReferenced( action_plan=action_plan_id) query.delete() def update_action_plan(self, action_plan_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing " "Action Plan.")) return self._do_update_action_plan(action_plan_id, values) @staticmethod def _do_update_action_plan(action_plan_id, values): with _session_for_write() as session: query = session.query(models.ActionPlan) query = add_identity_filter(query, action_plan_id) try: ref = query.with_for_update().one() except exc.NoResultFound: raise exception.ActionPlanNotFound(action_plan=action_plan_id) ref.update(values) return ref def soft_delete_action_plan(self, action_plan_id): try: return self._soft_delete(models.ActionPlan, action_plan_id) except exception.ResourceNotFound: raise exception.ActionPlanNotFound(action_plan=action_plan_id) # ### EFFICACY INDICATORS ### # def get_efficacy_indicator_list(self, *args, **kwargs): return self._get_model_list(models.EfficacyIndicator, self._add_efficacy_indicators_filters, *args, **kwargs) def create_efficacy_indicator(self, values): # ensure defaults are present for new efficacy indicators if not values.get('uuid'): values['uuid'] = utils.generate_uuid() try: efficacy_indicator = self._create(models.EfficacyIndicator, values) except db_exc.DBDuplicateEntry: raise exception.EfficacyIndicatorAlreadyExists(uuid=values['uuid']) return efficacy_indicator def _get_efficacy_indicator(self, context, fieldname, value, eager): try: return self._get(context, model=models.EfficacyIndicator, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.EfficacyIndicatorNotFound(efficacy_indicator=value) def get_efficacy_indicator_by_id(self, context, efficacy_indicator_id, eager=False): return self._get_efficacy_indicator( context, fieldname="id", value=efficacy_indicator_id, eager=eager) def get_efficacy_indicator_by_uuid(self, context, efficacy_indicator_uuid, eager=False): return self._get_efficacy_indicator( context, fieldname="uuid", value=efficacy_indicator_uuid, eager=eager) def get_efficacy_indicator_by_name(self, context, efficacy_indicator_name, eager=False): return self._get_efficacy_indicator( context, fieldname="name", value=efficacy_indicator_name, eager=eager) def update_efficacy_indicator(self, efficacy_indicator_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing " "efficacy indicator.")) try: return self._update( models.EfficacyIndicator, efficacy_indicator_id, values) except exception.ResourceNotFound: raise exception.EfficacyIndicatorNotFound( efficacy_indicator=efficacy_indicator_id) def soft_delete_efficacy_indicator(self, efficacy_indicator_id): try: return self._soft_delete( models.EfficacyIndicator, efficacy_indicator_id) except exception.ResourceNotFound: raise exception.EfficacyIndicatorNotFound( efficacy_indicator=efficacy_indicator_id) def destroy_efficacy_indicator(self, efficacy_indicator_id): try: return self._destroy( models.EfficacyIndicator, efficacy_indicator_id) except exception.ResourceNotFound: raise exception.EfficacyIndicatorNotFound( efficacy_indicator=efficacy_indicator_id) # ### SCORING ENGINES ### # def get_scoring_engine_list(self, *args, **kwargs): return self._get_model_list(models.ScoringEngine, self._add_scoring_engine_filters, *args, **kwargs) def create_scoring_engine(self, values): # ensure defaults are present for new scoring engines if not values.get('uuid'): values['uuid'] = utils.generate_uuid() try: scoring_engine = self._create(models.ScoringEngine, values) except db_exc.DBDuplicateEntry: raise exception.ScoringEngineAlreadyExists(uuid=values['uuid']) return scoring_engine def _get_scoring_engine(self, context, fieldname, value, eager): try: return self._get(context, model=models.ScoringEngine, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.ScoringEngineNotFound(scoring_engine=value) def get_scoring_engine_by_id(self, context, scoring_engine_id, eager=False): return self._get_scoring_engine( context, fieldname="id", value=scoring_engine_id, eager=eager) def get_scoring_engine_by_uuid(self, context, scoring_engine_uuid, eager=False): return self._get_scoring_engine( context, fieldname="uuid", value=scoring_engine_uuid, eager=eager) def get_scoring_engine_by_name(self, context, scoring_engine_name, eager=False): return self._get_scoring_engine( context, fieldname="name", value=scoring_engine_name, eager=eager) def destroy_scoring_engine(self, scoring_engine_id): try: return self._destroy(models.ScoringEngine, scoring_engine_id) except exception.ResourceNotFound: raise exception.ScoringEngineNotFound( scoring_engine=scoring_engine_id) def update_scoring_engine(self, scoring_engine_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing " "Scoring Engine.")) try: return self._update( models.ScoringEngine, scoring_engine_id, values) except exception.ResourceNotFound: raise exception.ScoringEngineNotFound( scoring_engine=scoring_engine_id) def soft_delete_scoring_engine(self, scoring_engine_id): try: return self._soft_delete( models.ScoringEngine, scoring_engine_id) except exception.ResourceNotFound: raise exception.ScoringEngineNotFound( scoring_engine=scoring_engine_id) # ### SERVICES ### # def get_service_list(self, *args, **kwargs): return self._get_model_list(models.Service, self._add_services_filters, *args, **kwargs) def create_service(self, values): try: service = self._create(models.Service, values) except db_exc.DBDuplicateEntry: raise exception.ServiceAlreadyExists(name=values['name'], host=values['host']) return service def _get_service(self, context, fieldname, value, eager): try: return self._get(context, model=models.Service, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.ServiceNotFound(service=value) def get_service_by_id(self, context, service_id, eager=False): return self._get_service( context, fieldname="id", value=service_id, eager=eager) def get_service_by_name(self, context, service_name, eager=False): return self._get_service( context, fieldname="name", value=service_name, eager=eager) def destroy_service(self, service_id): try: return self._destroy(models.Service, service_id) except exception.ResourceNotFound: raise exception.ServiceNotFound(service=service_id) def update_service(self, service_id, values): try: return self._update(models.Service, service_id, values) except exception.ResourceNotFound: raise exception.ServiceNotFound(service=service_id) def soft_delete_service(self, service_id): try: return self._soft_delete(models.Service, service_id) except exception.ResourceNotFound: raise exception.ServiceNotFound(service=service_id) # ### ACTION_DESCRIPTIONS ### # def get_action_description_list(self, *args, **kwargs): return self._get_model_list(models.ActionDescription, self._add_action_descriptions_filters, *args, **kwargs) def create_action_description(self, values): try: action_description = self._create(models.ActionDescription, values) except db_exc.DBDuplicateEntry: raise exception.ActionDescriptionAlreadyExists( action_type=values['action_type']) return action_description def _get_action_description(self, context, fieldname, value, eager): try: return self._get(context, model=models.ActionDescription, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.ActionDescriptionNotFound(action_id=value) def get_action_description_by_id(self, context, action_id, eager=False): return self._get_action_description( context, fieldname="id", value=action_id, eager=eager) def get_action_description_by_type(self, context, action_type, eager=False): return self._get_action_description( context, fieldname="action_type", value=action_type, eager=eager) def destroy_action_description(self, action_id): try: return self._destroy(models.ActionDescription, action_id) except exception.ResourceNotFound: raise exception.ActionDescriptionNotFound( action_id=action_id) def update_action_description(self, action_id, values): try: return self._update(models.ActionDescription, action_id, values) except exception.ResourceNotFound: raise exception.ActionDescriptionNotFound( action_id=action_id) def soft_delete_action_description(self, action_id): try: return self._soft_delete(models.ActionDescription, action_id) except exception.ResourceNotFound: raise exception.ActionDescriptionNotFound( action_id=action_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/job_store.py0000664000175000017500000001234000000000000023401 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica LTD # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle # nosec: B403 from apscheduler.jobstores.base import ConflictingIdError from apscheduler.jobstores import sqlalchemy from apscheduler.util import datetime_to_utc_timestamp from apscheduler.util import maybe_ref from apscheduler.util import utc_timestamp_to_datetime from oslo_serialization import jsonutils from watcher.common import context from watcher.common import service from watcher import objects from sqlalchemy import Table, MetaData, select, and_, null from sqlalchemy.exc import IntegrityError class WatcherJobStore(sqlalchemy.SQLAlchemyJobStore): """Stores jobs in a database table using SQLAlchemy. The table will be created if it doesn't exist in the database. Plugin alias: ``sqlalchemy`` :param str url: connection string :param engine: an SQLAlchemy Engine to use instead of creating a new one based on ``url`` :param str tablename: name of the table to store jobs in :param metadata: a :class:`~sqlalchemy.MetaData` instance to use instead of creating a new one :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available :param dict tag: tag description """ def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, tag=None): super(WatcherJobStore, self).__init__(url, engine, tablename, metadata, pickle_protocol) metadata = maybe_ref(metadata) or MetaData() self.jobs_t = Table(tablename, metadata, autoload_with=engine) service_ident = service.ServiceHeartbeat.get_service_name() self.tag = tag or {'host': service_ident[0], 'name': service_ident[1]} self.service_id = objects.Service.list(context=context.make_context(), filters=self.tag)[0].id def start(self, scheduler, alias): # There should be called 'start' method of parent of SQLAlchemyJobStore super(self.__class__.__bases__[0], self).start(scheduler, alias) def add_job(self, job): insert = self.jobs_t.insert().values(**{ 'id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol), 'service_id': self.service_id, 'tag': jsonutils.dumps(self.tag) }) try: with self.engine.begin() as conn: conn.execute(insert) except IntegrityError: raise ConflictingIdError(job.id) def get_all_jobs(self): jobs = self._get_jobs(self.jobs_t.c.tag == jsonutils.dumps(self.tag)) self._fix_paused_jobs_sorting(jobs) return jobs def get_next_run_time(self): selectable = select(self.jobs_t.c.next_run_time).\ where(self.jobs_t.c.next_run_time != null()).\ order_by(self.jobs_t.c.next_run_time).limit(1) with self.engine.begin() as connection: # NOTE(danms): The apscheduler implementation of this gets a # decimal.Decimal back from scalar() which causes # utc_timestamp_to_datetime() to choke since it is expecting a # python float. Assume this is SQLAlchemy 2.0 stuff, so just # coerce to a float here. next_run_time = connection.execute(selectable).scalar() return utc_timestamp_to_datetime(float(next_run_time) if next_run_time is not None else None) def _get_jobs(self, *conditions): jobs = [] conditions += (self.jobs_t.c.service_id == self.service_id,) selectable = select( self.jobs_t.c.id, self.jobs_t.c.job_state, self.jobs_t.c.tag ).order_by(self.jobs_t.c.next_run_time).where(and_(*conditions)) failed_job_ids = set() with self.engine.begin() as conn: for row in conn.execute(selectable): try: jobs.append(self._reconstitute_job(row.job_state)) except Exception: self._logger.exception( 'Unable to restore job "%s" -- removing it', row.id) failed_job_ids.add(row.id) # Remove all the jobs we failed to restore if failed_job_ids: delete = self.jobs_t.delete().where( self.jobs_t.c.id.in_(failed_job_ids)) self.engine.execute(delete) return jobs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/migration.py0000664000175000017500000000715400000000000023413 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import alembic from alembic import config as alembic_config import alembic.migration as alembic_migration from oslo_db import exception as db_exc from oslo_db.sqlalchemy import enginefacade from watcher._i18n import _ from watcher.db.sqlalchemy import models def _alembic_config(): path = os.path.join(os.path.dirname(__file__), 'alembic.ini') config = alembic_config.Config(path) return config def version(engine=None): """Current database version. :returns: Database version :rtype: string """ if engine is None: engine = enginefacade.reader.get_engine() with engine.connect() as conn: context = alembic_migration.MigrationContext.configure(conn) return context.get_current_revision() def upgrade(revision, config=None): """Used for upgrading database. :param version: Desired database version :type version: string """ revision = revision or 'head' config = config or _alembic_config() alembic.command.upgrade(config, revision) def create_schema(config=None, engine=None): """Create database schema from models description. Can be used for initial installation instead of upgrade('head'). """ if engine is None: engine = enginefacade.writer.get_engine() # NOTE(viktors): If we will use metadata.create_all() for non empty db # schema, it will only add the new tables, but leave # existing as is. So we should avoid of this situation. if version(engine=engine) is not None: raise db_exc.DBMigrationError( _("Watcher database schema is already under version control; " "use upgrade() instead")) models.Base.metadata.create_all(engine) stamp('head', config=config) def downgrade(revision, config=None): """Used for downgrading database. :param version: Desired database version :type version: string """ revision = revision or 'base' config = config or _alembic_config() return alembic.command.downgrade(config, revision) def stamp(revision, config=None): """Stamps database with provided revision. Don't run any migrations. :param revision: Should match one from repository or head - to stamp database with most recent revision :type revision: string """ config = config or _alembic_config() return alembic.command.stamp(config, revision=revision) def revision(message=None, autogenerate=False, config=None): """Creates template for migration. :param message: Text that will be used for migration title :type message: string :param autogenerate: If True - generates diff based on current database state :type autogenerate: bool """ config = config or _alembic_config() return alembic.command.revision(config, message=message, autogenerate=autogenerate) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/db/sqlalchemy/models.py0000664000175000017500000002476300000000000022712 0ustar00zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for watcher service """ from oslo_db.sqlalchemy import models from oslo_serialization import jsonutils from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Float from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import LargeBinary from sqlalchemy import Numeric from sqlalchemy import orm from sqlalchemy import String from sqlalchemy import Text from sqlalchemy.types import TypeDecorator, TEXT from sqlalchemy import UniqueConstraint import urllib.parse as urlparse from watcher import conf CONF = conf.CONF def table_args(): engine_name = urlparse.urlparse(CONF.database.connection).scheme if engine_name == 'mysql': return {'mysql_engine': CONF.database.mysql_engine, 'mysql_charset': "utf8"} return None class JsonEncodedType(TypeDecorator): """Abstract base type serialized as json-encoded string in db.""" type = None impl = TEXT def process_bind_param(self, value, dialect): if value is None: # Save default value according to current type to keep the # interface the consistent. value = self.type() elif not isinstance(value, self.type): raise TypeError("%s supposes to store %s objects, but %s given" % (self.__class__.__name__, self.type.__name__, type(value).__name__)) serialized_value = jsonutils.dumps(value) return serialized_value def process_result_value(self, value, dialect): if value is not None: value = jsonutils.loads(value) return value class JSONEncodedDict(JsonEncodedType): """Represents dict serialized as json-encoded string in db.""" type = dict class JSONEncodedList(JsonEncodedType): """Represents list serialized as json-encoded string in db.""" type = list class WatcherBase(models.SoftDeleteMixin, models.TimestampMixin, models.ModelBase): metadata = None def as_dict(self): d = {} for c in self.__table__.columns: d[c.name] = self[c.name] return d Base = declarative_base(cls=WatcherBase) class Goal(Base): """Represents a goal.""" __tablename__ = 'goals' __table_args__ = ( UniqueConstraint('uuid', name='uniq_goals0uuid'), UniqueConstraint('name', 'deleted', name='uniq_goals0name'), table_args(), ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36)) name = Column(String(63), nullable=False) display_name = Column(String(63), nullable=False) efficacy_specification = Column(JSONEncodedList, nullable=False) class Strategy(Base): """Represents a strategy.""" __tablename__ = 'strategies' __table_args__ = ( UniqueConstraint('uuid', name='uniq_strategies0uuid'), UniqueConstraint('name', 'deleted', name='uniq_strategies0name'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36)) name = Column(String(63), nullable=False) display_name = Column(String(63), nullable=False) goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) parameters_spec = Column(JSONEncodedDict, nullable=True) goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) class AuditTemplate(Base): """Represents an audit template.""" __tablename__ = 'audit_templates' __table_args__ = ( UniqueConstraint('uuid', name='uniq_audit_templates0uuid'), UniqueConstraint('name', 'deleted', name='uniq_audit_templates0name'), table_args() ) id = Column(Integer, primary_key=True) uuid = Column(String(36)) name = Column(String(63), nullable=True) description = Column(String(255), nullable=True) goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=True) scope = Column(JSONEncodedList) goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) class Audit(Base): """Represents an audit.""" __tablename__ = 'audits' __table_args__ = ( UniqueConstraint('uuid', name='uniq_audits0uuid'), UniqueConstraint('name', 'deleted', name='uniq_audits0name'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36)) name = Column(String(63), nullable=True) audit_type = Column(String(20)) state = Column(String(20), nullable=True) parameters = Column(JSONEncodedDict, nullable=True) interval = Column(String(36), nullable=True) goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=True) scope = Column(JSONEncodedList, nullable=True) auto_trigger = Column(Boolean, nullable=False) next_run_time = Column(DateTime, nullable=True) hostname = Column(String(255), nullable=True) start_time = Column(DateTime, nullable=True) end_time = Column(DateTime, nullable=True) force = Column(Boolean, nullable=False) goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) class ActionPlan(Base): """Represents an action plan.""" __tablename__ = 'action_plans' __table_args__ = ( UniqueConstraint('uuid', name='uniq_action_plans0uuid'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36)) audit_id = Column(Integer, ForeignKey('audits.id'), nullable=False) strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=False) state = Column(String(20), nullable=True) global_efficacy = Column(JSONEncodedList, nullable=True) hostname = Column(String(255), nullable=True) audit = orm.relationship(Audit, foreign_keys=audit_id, lazy=None) strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) class Action(Base): """Represents an action.""" __tablename__ = 'actions' __table_args__ = ( UniqueConstraint('uuid', name='uniq_actions0uuid'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36), nullable=False) action_plan_id = Column(Integer, ForeignKey('action_plans.id'), nullable=False) # only for the first version action_type = Column(String(255), nullable=False) input_parameters = Column(JSONEncodedDict, nullable=True) state = Column(String(20), nullable=True) parents = Column(JSONEncodedList, nullable=True) action_plan = orm.relationship( ActionPlan, foreign_keys=action_plan_id, lazy=None) class EfficacyIndicator(Base): """Represents an efficacy indicator.""" __tablename__ = 'efficacy_indicators' __table_args__ = ( UniqueConstraint('uuid', name='uniq_efficacy_indicators0uuid'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36)) name = Column(String(63)) description = Column(String(255), nullable=True) unit = Column(String(63), nullable=True) value = Column(Numeric()) action_plan_id = Column(Integer, ForeignKey('action_plans.id'), nullable=False) action_plan = orm.relationship( ActionPlan, foreign_keys=action_plan_id, lazy=None) class ScoringEngine(Base): """Represents a scoring engine.""" __tablename__ = 'scoring_engines' __table_args__ = ( UniqueConstraint('uuid', name='uniq_scoring_engines0uuid'), UniqueConstraint('name', 'deleted', name='uniq_scoring_engines0name'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36), nullable=False) name = Column(String(63), nullable=False) description = Column(String(255), nullable=True) # Metainfo might contain some additional information about the data model. # The format might vary between different models (e.g. be JSON, XML or # even some custom format), the blob type should cover all scenarios. metainfo = Column(Text, nullable=True) class Service(Base): """Represents a service entity""" __tablename__ = 'services' __table_args__ = ( UniqueConstraint('host', 'name', 'deleted', name="uniq_services0host0name0deleted"), table_args() ) id = Column(Integer, primary_key=True) name = Column(String(255), nullable=False) host = Column(String(255), nullable=False) last_seen_up = Column(DateTime, nullable=True) class ActionDescription(Base): """Represents a action description""" __tablename__ = 'action_descriptions' __table_args__ = ( UniqueConstraint('action_type', name="uniq_action_description0action_type"), table_args() ) id = Column(Integer, primary_key=True) action_type = Column(String(255), nullable=False) description = Column(String(255), nullable=False) class APScheulerJob(Base): """Represents apscheduler jobs""" __tablename__ = 'apscheduler_jobs' __table_args__ = ( UniqueConstraint('id', name="uniq_apscheduler_jobs0id"), table_args() ) id = Column(String(191), nullable=False, primary_key=True) next_run_time = Column(Float(25), index=True) job_state = Column(LargeBinary, nullable=False) tag = Column(JSONEncodedDict(), nullable=True) service_id = Column(Integer, ForeignKey('services.id'), nullable=False) service = orm.relationship( Service, foreign_keys=service_id, lazy=None) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/decision_engine/0000775000175000017500000000000000000000000021434 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/__init__.py0000664000175000017500000000000000000000000023533 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/decision_engine/audit/0000775000175000017500000000000000000000000022542 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/audit/__init__.py0000664000175000017500000000000000000000000024641 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/audit/base.py0000664000175000017500000001304500000000000024031 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from oslo_config import cfg from oslo_log import log from watcher.applier import rpcapi from watcher.common import exception from watcher.common import service from watcher.decision_engine.loading import default as loader from watcher.decision_engine.strategy.context import default as default_context from watcher import notifications from watcher import objects from watcher.objects import fields CONF = cfg.CONF LOG = log.getLogger(__name__) class BaseMetaClass(service.Singleton, abc.ABCMeta): pass class BaseAuditHandler(object, metaclass=BaseMetaClass): @abc.abstractmethod def execute(self, audit, request_context): raise NotImplementedError() @abc.abstractmethod def pre_execute(self, audit, request_context): raise NotImplementedError() @abc.abstractmethod def do_execute(self, audit, request_context): raise NotImplementedError() @abc.abstractmethod def post_execute(self, audit, solution, request_context): raise NotImplementedError() class AuditHandler(BaseAuditHandler, metaclass=abc.ABCMeta): def __init__(self): super(AuditHandler, self).__init__() self._strategy_context = default_context.DefaultStrategyContext() self._planner_loader = loader.DefaultPlannerLoader() self.applier_client = rpcapi.ApplierAPI() def get_planner(self, solution): # because AuditHandler is a singletone we need to avoid race condition. # thus we need to load planner every time planner_name = solution.strategy.planner LOG.debug("Loading %s", planner_name) planner = self._planner_loader.load(name=planner_name) return planner @property def strategy_context(self): return self._strategy_context def do_execute(self, audit, request_context): # execute the strategy solution = self.strategy_context.execute_strategy( audit, request_context) return solution def do_schedule(self, request_context, audit, solution): try: notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.PLANNER, phase=fields.NotificationPhase.START) planner = self.get_planner(solution) action_plan = planner.schedule(request_context, audit.id, solution) notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.PLANNER, phase=fields.NotificationPhase.END) return action_plan except Exception: notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.PLANNER, priority=fields.NotificationPriority.ERROR, phase=fields.NotificationPhase.ERROR) raise @staticmethod def update_audit_state(audit, state): if audit.state != state: LOG.debug("Update audit state: %s", state) audit.state = state audit.save() @staticmethod def check_ongoing_action_plans(request_context): a_plan_filters = {'state': objects.action_plan.State.ONGOING} ongoing_action_plans = objects.ActionPlan.list( request_context, filters=a_plan_filters) if ongoing_action_plans: raise exception.ActionPlanIsOngoing( action_plan=ongoing_action_plans[0].uuid) def pre_execute(self, audit, request_context): LOG.debug("Trigger audit %s", audit.uuid) # If audit.force is true, audit will be executed # despite of ongoing actionplan if not audit.force: self.check_ongoing_action_plans(request_context) # Write hostname that will execute this audit. audit.hostname = CONF.host # change state of the audit to ONGOING self.update_audit_state(audit, objects.audit.State.ONGOING) def post_execute(self, audit, solution, request_context): action_plan = self.do_schedule(request_context, audit, solution) if audit.auto_trigger: self.applier_client.launch_action_plan(request_context, action_plan.uuid) def execute(self, audit, request_context): try: self.pre_execute(audit, request_context) solution = self.do_execute(audit, request_context) self.post_execute(audit, solution, request_context) except exception.ActionPlanIsOngoing as e: LOG.warning(e) if audit.audit_type == objects.audit.AuditType.ONESHOT.value: self.update_audit_state(audit, objects.audit.State.CANCELLED) except Exception as e: LOG.exception(e) self.update_audit_state(audit, objects.audit.State.FAILED) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/audit/continuous.py0000664000175000017500000002243700000000000025332 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica LTD # Copyright (c) 2016 Intel Corp # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from croniter import croniter from dateutil import tz from oslo_utils import timeutils from watcher.common import context from watcher.common import scheduling from watcher.common import utils from watcher import conf from watcher.db.sqlalchemy import api as sq_api from watcher.db.sqlalchemy import job_store from watcher.decision_engine.audit import base from watcher import objects CONF = conf.CONF class ContinuousAuditHandler(base.AuditHandler): def __init__(self): super(ContinuousAuditHandler, self).__init__() # scheduler for executing audits self._audit_scheduler = None # scheduler for a periodic task to launch audit self._period_scheduler = None self.context_show_deleted = context.RequestContext(is_admin=True, show_deleted=True) @property def scheduler(self): if self._audit_scheduler is None: self._audit_scheduler = scheduling.BackgroundSchedulerService( jobstores={ 'default': job_store.WatcherJobStore( engine=sq_api.enginefacade.writer.get_engine()), } ) return self._audit_scheduler @property def period_scheduler(self): if self._period_scheduler is None: self._period_scheduler = scheduling.BackgroundSchedulerService() return self._period_scheduler def _is_audit_inactive(self, audit): audit = objects.Audit.get_by_uuid( self.context_show_deleted, audit.uuid, eager=True) if (objects.audit.AuditStateTransitionManager().is_inactive(audit) or (audit.hostname != CONF.host) or (self.check_audit_expired(audit))): # if audit isn't in active states, audit's job must be removed to # prevent using of inactive audit in future. jobs = [job for job in self.scheduler.get_jobs() if job.name == 'execute_audit' and job.args[0].uuid == audit.uuid] if jobs: jobs[0].remove() return True return False def do_execute(self, audit, request_context): solution = super(ContinuousAuditHandler, self)\ .do_execute(audit, request_context) if audit.audit_type == objects.audit.AuditType.CONTINUOUS.value: a_plan_filters = {'audit_uuid': audit.uuid, 'state': objects.action_plan.State.RECOMMENDED} action_plans = objects.ActionPlan.list( request_context, filters=a_plan_filters, eager=True) for plan in action_plans: plan.state = objects.action_plan.State.CANCELLED plan.save() return solution @staticmethod def _next_cron_time(audit): if utils.is_cron_like(audit.interval): return croniter(audit.interval, timeutils.utcnow() ).get_next(datetime.datetime) @classmethod def execute_audit(cls, audit, request_context): self = cls() if not self._is_audit_inactive(audit): try: self.execute(audit, request_context) except Exception: raise finally: if utils.is_int_like(audit.interval): audit.next_run_time = ( timeutils.utcnow() + datetime.timedelta(seconds=int(audit.interval))) else: audit.next_run_time = self._next_cron_time(audit) audit.save() def _add_job(self, trigger, audit, audit_context, **trigger_args): time_var = 'next_run_time' if trigger_args.get( 'next_run_time') else 'run_date' # We should convert UTC time to local time without tzinfo trigger_args[time_var] = trigger_args[time_var].replace( tzinfo=tz.tzutc()).astimezone(tz.tzlocal()).replace(tzinfo=None) self.scheduler.add_job(self.execute_audit, trigger, args=[audit, audit_context], name='execute_audit', **trigger_args) def check_audit_expired(self, audit): current = timeutils.utcnow() # Note: if audit still didn't get into the timeframe, # skip it if audit.start_time and audit.start_time > current: return True if audit.end_time and audit.end_time < current: if audit.state != objects.audit.State.SUCCEEDED: audit.state = objects.audit.State.SUCCEEDED audit.save() return True return False def launch_audits_periodically(self): # if audit scheduler stop, restart it if not self.scheduler.running: self.scheduler.start() audit_context = context.RequestContext(is_admin=True) audit_filters = { 'audit_type': objects.audit.AuditType.CONTINUOUS.value, 'state__in': (objects.audit.State.PENDING, objects.audit.State.ONGOING), } audit_filters['hostname'] = None unscheduled_audits = objects.Audit.list( audit_context, filters=audit_filters, eager=True) for audit in unscheduled_audits: # If continuous audit doesn't have a hostname yet, # Watcher will set current CONF.host value. # TODO(alexchadin): Add scheduling of new continuous audits. audit.hostname = CONF.host audit.save() scheduler_job_args = [ (job.args[0].uuid, job) for job in self.scheduler.get_jobs() if job.name == 'execute_audit'] scheduler_jobs = dict(scheduler_job_args) # if audit isn't in active states, audit's job should be removed jobs_to_remove = [] for job in scheduler_jobs.values(): if self._is_audit_inactive(job.args[0]): jobs_to_remove.append(job.args[0].uuid) for audit_uuid in jobs_to_remove: scheduler_jobs.pop(audit_uuid) audit_filters['hostname'] = CONF.host audits = objects.Audit.list( audit_context, filters=audit_filters, eager=True) for audit in audits: if self.check_audit_expired(audit): continue existing_job = scheduler_jobs.get(audit.uuid, None) # if audit is not presented in scheduled audits yet, # just add a new audit job. # if audit is already in the job queue, and interval has changed, # we need to remove the old job and add a new one. if (existing_job is None) or ( existing_job and audit.interval != existing_job.args[0].interval): if existing_job: self.scheduler.remove_job(existing_job.id) # if interval is provided with seconds if utils.is_int_like(audit.interval): # if audit has already been provided and we need # to restore it after shutdown if audit.next_run_time is not None: old_run_time = audit.next_run_time current = timeutils.utcnow() if old_run_time < current: delta = datetime.timedelta( seconds=(int(audit.interval) - ( current - old_run_time).seconds % int(audit.interval))) audit.next_run_time = current + delta next_run_time = audit.next_run_time # if audit is new one else: next_run_time = timeutils.utcnow() self._add_job('interval', audit, audit_context, seconds=int(audit.interval), next_run_time=next_run_time) else: audit.next_run_time = self._next_cron_time(audit) self._add_job('date', audit, audit_context, run_date=audit.next_run_time) audit.hostname = CONF.host audit.save() def start(self): self.period_scheduler.add_job( self.launch_audits_periodically, 'interval', seconds=CONF.watcher_decision_engine.continuous_audit_interval, next_run_time=datetime.datetime.now()) self.period_scheduler.start() # audit scheduler start self.scheduler.start() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/audit/event.py0000664000175000017500000000203100000000000024231 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.audit import base from watcher import objects class EventAuditHandler(base.AuditHandler): def post_execute(self, audit, solution, request_context): super(EventAuditHandler, self).post_execute(audit, solution, request_context) # change state of the audit to SUCCEEDED self.update_audit_state(audit, objects.audit.State.SUCCEEDED) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/audit/oneshot.py0000664000175000017500000000202600000000000024573 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.audit import base from watcher import objects class OneShotAuditHandler(base.AuditHandler): def post_execute(self, audit, solution, request_context): super(OneShotAuditHandler, self).post_execute(audit, solution, request_context) # change state of the audit to SUCCEEDED self.update_audit_state(audit, objects.audit.State.SUCCEEDED) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/decision_engine/datasources/0000775000175000017500000000000000000000000023751 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/datasources/__init__.py0000664000175000017500000000000000000000000026050 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/datasources/base.py0000664000175000017500000002303200000000000025235 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import time from oslo_config import cfg from oslo_log import log from watcher.common import exception CONF = cfg.CONF LOG = log.getLogger(__name__) class DataSourceBase(object): """Base Class for datasources in Watcher This base class defines the abstract methods that datasources should implement and contains details on the values expected for parameters as well as what the values for return types should be. """ """Possible options for the parameters named aggregate""" AGGREGATES = ['mean', 'min', 'max', 'count'] """Possible options for the parameters named resource_type""" RESOURCE_TYPES = ['compute_node', 'instance', 'bare_metal', 'storage'] """Each datasource should have a uniquely identifying name""" NAME = '' """Possible metrics a datasource can support and their internal name""" METRIC_MAP = dict(host_cpu_usage=None, host_ram_usage=None, host_outlet_temp=None, host_inlet_temp=None, host_airflow=None, host_power=None, instance_cpu_usage=None, instance_ram_usage=None, instance_ram_allocated=None, instance_l3_cache_usage=None, instance_root_disk_size=None, ) def _get_meter(self, meter_name): """Retrieve the meter from the metric map or raise error""" meter = self.METRIC_MAP.get(meter_name) if meter is None: raise exception.MetricNotAvailable(metric=meter_name) return meter def query_retry(self, f, *args, ignored_exc=None, **kwargs): """Attempts to retrieve metrics from the external service Attempts to access data from the external service and handles exceptions upon exception the retrieval should be retried in accordance to the value of query_max_retries :param f: The method that performs the actual querying for metrics :param args: Array of arguments supplied to the method :param ignored_exc: An exception or tuple of exceptions that shouldn't be retried, for example "NotFound" exceptions. :param kwargs: The amount of arguments supplied to the method :return: The value as retrieved from the external service """ num_retries = CONF.watcher_datasources.query_max_retries timeout = CONF.watcher_datasources.query_timeout ignored_exc = ignored_exc or tuple() for i in range(num_retries): try: return f(*args, **kwargs) except ignored_exc as e: LOG.debug("Got an ignored exception (%s) while calling: %s ", e, f) return except Exception as e: LOG.exception(e) self.query_retry_reset(e) LOG.warning("Retry %d of %d while retrieving metrics retry " "in %d seconds", i+1, num_retries, timeout) time.sleep(timeout) @abc.abstractmethod def query_retry_reset(self, exception_instance): """Abstract method to perform reset operations upon request failure""" pass @abc.abstractmethod def list_metrics(self): """Returns the supported metrics that the datasource can retrieve :return: List of supported metrics containing keys from METRIC_MAP """ pass @abc.abstractmethod def check_availability(self): """Tries to contact the datasource to see if it is available :return: True or False with true meaning the datasource is available """ pass @abc.abstractmethod def statistic_aggregation(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): """Retrieves and converts metrics based on the specified parameters :param resource: Resource object as defined in watcher models such as ComputeNode and Instance :param resource_type: Indicates which type of object is supplied to the resource parameter :param meter_name: The desired metric to retrieve as key from METRIC_MAP :param period: Time span to collect metrics from in seconds :param granularity: Interval between samples in measurements in seconds :param aggregate: Aggregation method to extract value from set of samples :return: The gathered value for the metric the type of value depends on the meter_name """ pass @abc.abstractmethod def statistic_series(self, resource=None, resource_type=None, meter_name=None, start_time=None, end_time=None, granularity=300): """Retrieves metrics based on the specified parameters over a period :param resource: Resource object as defined in watcher models such as ComputeNode and Instance :param resource_type: Indicates which type of object is supplied to the resource parameter :param meter_name: The desired metric to retrieve as key from METRIC_MAP :param start_time: The datetime to start retrieving metrics for :type start_time: datetime.datetime :param end_time: The datetime to limit the retrieval of metrics to :type end_time: datetime.datetime :param granularity: Interval between samples in measurements in seconds :return: Dictionary of key value pairs with timestamps and metric values """ pass @abc.abstractmethod def get_host_cpu_usage(self, resource, period, aggregate, granularity=None): """Get the cpu usage for a host such as a compute_node :return: cpu usage as float ranging between 0 and 100 representing the total cpu usage as percentage """ pass @abc.abstractmethod def get_host_ram_usage(self, resource, period, aggregate, granularity=None): """Get the ram usage for a host such as a compute_node :return: ram usage as float in megabytes """ pass @abc.abstractmethod def get_host_outlet_temp(self, resource, period, aggregate, granularity=None): """Get the outlet temperature for a host such as compute_node :return: outlet temperature as float in degrees celsius """ pass @abc.abstractmethod def get_host_inlet_temp(self, resource, period, aggregate, granularity=None): """Get the inlet temperature for a host such as compute_node :return: inlet temperature as float in degrees celsius """ pass @abc.abstractmethod def get_host_airflow(self, resource, period, aggregate, granularity=None): """Get the airflow for a host such as compute_node :return: airflow as float in cfm """ pass @abc.abstractmethod def get_host_power(self, resource, period, aggregate, granularity=None): """Get the power for a host such as compute_node :return: power as float in watts """ pass @abc.abstractmethod def get_instance_cpu_usage(self, resource, period, aggregate, granularity=None): """Get the cpu usage for an instance :return: cpu usage as float ranging between 0 and 100 representing the total cpu usage as percentage """ pass @abc.abstractmethod def get_instance_ram_usage(self, resource, period, aggregate, granularity=None): """Get the ram usage for an instance :return: ram usage as float in megabytes """ pass @abc.abstractmethod def get_instance_ram_allocated(self, resource, period, aggregate, granularity=None): """Get the ram allocated for an instance :return: total ram allocated as float in megabytes """ pass @abc.abstractmethod def get_instance_l3_cache_usage(self, resource, period, aggregate, granularity=None): """Get the l3 cache usage for an instance :return: l3 cache usage as integer in bytes """ pass @abc.abstractmethod def get_instance_root_disk_size(self, resource, period, aggregate, granularity=None): """Get the size of the root disk for an instance :return: root disk size as float in gigabytes """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/datasources/gnocchi.py0000664000175000017500000002427000000000000025742 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import timedelta from gnocchiclient import exceptions as gnc_exc from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from watcher.common import clients from watcher.decision_engine.datasources import base CONF = cfg.CONF LOG = log.getLogger(__name__) class GnocchiHelper(base.DataSourceBase): NAME = 'gnocchi' METRIC_MAP = dict(host_cpu_usage='compute.node.cpu.percent', host_ram_usage='hardware.memory.used', host_outlet_temp='hardware.ipmi.node.outlet_temperature', host_inlet_temp='hardware.ipmi.node.temperature', host_airflow='hardware.ipmi.node.airflow', host_power='hardware.ipmi.node.power', instance_cpu_usage='cpu', instance_ram_usage='memory.resident', instance_ram_allocated='memory', instance_l3_cache_usage='cpu_l3_cache', instance_root_disk_size='disk.root.size', ) def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.gnocchi = self.osc.gnocchi() def check_availability(self): status = self.query_retry(self.gnocchi.status.get) if status: return 'available' else: return 'not available' def list_metrics(self): """List the user's meters.""" response = self.query_retry(f=self.gnocchi.metric.list) if not response: return set() else: return set([metric['name'] for metric in response]) def statistic_aggregation(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): stop_time = timeutils.utcnow() start_time = stop_time - timedelta(seconds=(int(period))) meter = self._get_meter(meter_name) if aggregate == 'count': aggregate = 'mean' LOG.warning('aggregate type count not supported by gnocchi,' ' replaced with mean.') resource_id = resource.uuid if resource_type == 'compute_node': resource_id = "%s_%s" % (resource.hostname, resource.hostname) kwargs = dict(query={"=": {"original_resource_id": resource_id}}, limit=1) resources = self.query_retry( f=self.gnocchi.resource.search, ignored_exc=gnc_exc.NotFound, **kwargs) if not resources: LOG.warning("The %s resource %s could not be found", self.NAME, resource_id) return resource_id = resources[0]['id'] if meter_name == "instance_cpu_usage": if resource_type != "instance": LOG.warning("Unsupported resource type for metric " "'instance_cpu_usage': %s", resource_type) return # The "cpu_util" gauge (percentage) metric has been removed. # We're going to obtain the same result by using the rate of change # aggregate operation. if aggregate not in ("mean", "rate:mean"): LOG.warning("Unsupported aggregate for instance_cpu_usage " "metric: %s. " "Supported aggregates: mean, rate:mean ", aggregate) return # TODO(lpetrut): consider supporting other aggregates. aggregate = "rate:mean" raw_kwargs = dict( metric=meter, start=start_time, stop=stop_time, resource_id=resource_id, granularity=granularity, aggregation=aggregate, ) kwargs = {k: v for k, v in raw_kwargs.items() if k and v} statistics = self.query_retry( f=self.gnocchi.metric.get_measures, ignored_exc=gnc_exc.NotFound, **kwargs) return_value = None if statistics: # return value of latest measure # measure has structure [time, granularity, value] return_value = statistics[-1][2] if meter_name == 'host_airflow': # Airflow from hardware.ipmi.node.airflow is reported as # 1/10 th of actual CFM return_value *= 10 if meter_name == "instance_cpu_usage": # "rate:mean" can return negative values for migrated vms. return_value = max(0, return_value) # We're converting the cumulative cpu time (ns) to cpu usage # percentage. vcpus = resource.vcpus if not vcpus: LOG.warning("instance vcpu count not set, assuming 1") vcpus = 1 return_value *= 100 / (granularity * 10e+8) / vcpus return return_value def statistic_series(self, resource=None, resource_type=None, meter_name=None, start_time=None, end_time=None, granularity=300): meter = self._get_meter(meter_name) resource_id = resource.uuid if resource_type == 'compute_node': resource_id = "%s_%s" % (resource.hostname, resource.hostname) kwargs = dict(query={"=": {"original_resource_id": resource_id}}, limit=1) resources = self.query_retry( f=self.gnocchi.resource.search, ignored_exc=gnc_exc.NotFound, **kwargs) if not resources: LOG.warning("The %s resource %s could not be found", self.NAME, resource_id) return resource_id = resources[0]['id'] raw_kwargs = dict( metric=meter, start=start_time, stop=end_time, resource_id=resource_id, granularity=granularity, ) kwargs = {k: v for k, v in raw_kwargs.items() if k and v} statistics = self.query_retry( f=self.gnocchi.metric.get_measures, ignored_exc=gnc_exc.NotFound, **kwargs) return_value = None if statistics: # measure has structure [time, granularity, value] if meter_name == 'host_airflow': # Airflow from hardware.ipmi.node.airflow is reported as # 1/10 th of actual CFM return_value = {s[0]: s[2]*10 for s in statistics} else: return_value = {s[0]: s[2] for s in statistics} return return_value def get_host_cpu_usage(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_cpu_usage', period, aggregate, granularity) def get_host_ram_usage(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_ram_usage', period, aggregate, granularity) def get_host_outlet_temp(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_outlet_temp', period, aggregate, granularity) def get_host_inlet_temp(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_inlet_temp', period, aggregate, granularity) def get_host_airflow(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_airflow', period, aggregate, granularity) def get_host_power(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_power', period, aggregate, granularity) def get_instance_cpu_usage(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'instance', 'instance_cpu_usage', period, aggregate, granularity) def get_instance_ram_usage(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'instance', 'instance_ram_usage', period, aggregate, granularity) def get_instance_ram_allocated(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'instance', 'instance_ram_allocated', period, aggregate, granularity) def get_instance_l3_cache_usage(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'instance', 'instance_l3_cache_usage', period, aggregate, granularity) def get_instance_root_disk_size(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'instance', 'instance_root_disk_size', period, aggregate, granularity) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/datasources/grafana.py0000664000175000017500000002404700000000000025731 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from urllib import parse as urlparse from http import HTTPStatus from oslo_config import cfg from oslo_log import log from watcher._i18n import _ from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import base from watcher.decision_engine.datasources.grafana_translator import influxdb import requests CONF = cfg.CONF LOG = log.getLogger(__name__) class GrafanaHelper(base.DataSourceBase): NAME = 'grafana' """METRIC_MAP is only available at runtime _build_metric_map""" METRIC_MAP = dict() """All available translators""" TRANSLATOR_LIST = [ influxdb.InfluxDBGrafanaTranslator.NAME ] def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.nova = self.osc.nova() self.configured = False self._base_url = None self._headers = None self._setup() def _setup(self): """Configure grafana helper to perform requests""" token = CONF.grafana_client.token base_url = CONF.grafana_client.base_url if not token: LOG.critical("GrafanaHelper authentication token not configured") return self._headers = {"Authorization": "Bearer " + token, "Content-Type": "Application/json"} if not base_url: LOG.critical("GrafanaHelper url not properly configured, " "check base_url") return self._base_url = base_url # Very basic url parsing parse = urlparse.urlparse(self._base_url) if parse.scheme == '' or parse.netloc == '' or parse.path == '': LOG.critical("GrafanaHelper url not properly configured, " "check base_url and project_id") return self._build_metric_map() if len(self.METRIC_MAP) == 0: LOG.critical("GrafanaHelper not configured for any metrics") self.configured = True def _build_metric_map(self): """Builds the metric map by reading config information""" for key, value in CONF.grafana_client.database_map.items(): try: project = CONF.grafana_client.project_id_map[key] attribute = CONF.grafana_client.attribute_map[key] translator = CONF.grafana_client.translator_map[key] query = CONF.grafana_client.query_map[key] if project is not None and \ value is not None and\ translator in self.TRANSLATOR_LIST and\ query is not None: self.METRIC_MAP[key] = { 'db': value, 'project': project, 'attribute': attribute, 'translator': translator, 'query': query } except KeyError as e: LOG.error(e) def _build_translator_schema(self, metric, db, attribute, query, resource, resource_type, period, aggregate, granularity): """Create dictionary to pass to grafana proxy translators""" return {'metric': metric, 'db': db, 'attribute': attribute, 'query': query, 'resource': resource, 'resource_type': resource_type, 'period': period, 'aggregate': aggregate, 'granularity': granularity} def _get_translator(self, name, data): """Use the names of translators to get the translator for the metric""" if name == influxdb.InfluxDBGrafanaTranslator.NAME: return influxdb.InfluxDBGrafanaTranslator(data) else: raise exception.InvalidParameter( parameter='name', parameter_type='grafana translator') def _request(self, params, project_id): """Make the request to the endpoint to retrieve data If the request fails, determines what error to raise. """ if self.configured is False: raise exception.DataSourceNotAvailable(self.NAME) resp = requests.get(self._base_url + str(project_id) + '/query', params=params, headers=self._headers, timeout=CONF.grafana_client.http_timeout) if resp.status_code == HTTPStatus.OK: return resp elif resp.status_code == HTTPStatus.BAD_REQUEST: LOG.error("Query for metric is invalid") elif resp.status_code == HTTPStatus.UNAUTHORIZED: LOG.error("Authorization token is invalid") raise exception.DataSourceNotAvailable(self.NAME) def statistic_aggregation(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): """Get the value for the specific metric based on specified parameters """ try: self.METRIC_MAP[meter_name] except KeyError: LOG.error( "Metric: %s does not appear in the current Grafana metric map", meter_name) raise exception.MetricNotAvailable(metric=meter_name) db = self.METRIC_MAP[meter_name]['db'] project = self.METRIC_MAP[meter_name]['project'] attribute = self.METRIC_MAP[meter_name]['attribute'] translator_name = self.METRIC_MAP[meter_name]['translator'] query = self.METRIC_MAP[meter_name]['query'] data = self._build_translator_schema( meter_name, db, attribute, query, resource, resource_type, period, aggregate, granularity) translator = self._get_translator(translator_name, data) params = translator.build_params() raw_kwargs = dict( params=params, project_id=project, ) kwargs = {k: v for k, v in raw_kwargs.items() if k and v} resp = self.query_retry(self._request, **kwargs) if not resp: LOG.warning("Datasource %s is not available.", self.NAME) return result = translator.extract_result(resp.content) return result def statistic_series(self, resource=None, resource_type=None, meter_name=None, start_time=None, end_time=None, granularity=300): raise NotImplementedError( _('Grafana helper does not support statistic series method')) def get_host_cpu_usage(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_cpu_usage', period, aggregate, granularity) def get_host_ram_usage(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_ram_usage', period, aggregate, granularity) def get_host_outlet_temp(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_outlet_temp', period, aggregate, granularity) def get_host_inlet_temp(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_inlet_temp', period, aggregate, granularity) def get_host_airflow(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_airflow', period, aggregate, granularity) def get_host_power(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_power', period, aggregate, granularity) def get_instance_cpu_usage(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_cpu_usage', period, aggregate, granularity) def get_instance_ram_usage(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_ram_usage', period, aggregate, granularity) def get_instance_ram_allocated(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_ram_allocated', period, aggregate, granularity) def get_instance_l3_cache_usage(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_l3_cache_usage', period, aggregate, granularity) def get_instance_root_disk_size(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_root_disk_size', period, aggregate, granularity) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/decision_engine/datasources/grafana_translator/0000775000175000017500000000000000000000000027621 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/datasources/grafana_translator/__init__.py0000664000175000017500000000000000000000000031720 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/datasources/grafana_translator/base.py0000664000175000017500000001071200000000000031106 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.datasources import base class BaseGrafanaTranslator(object): """Grafana translator baseclass to use with grafana for different databases Specific databasses that are proxied through grafana require some alterations depending on the database. """ """ data { metric: name of the metric as found in DataSourceBase.METRIC_MAP, db: database specified for this metric in grafana_client config options, attribute: the piece of information that will be selected from the resource object to build the query. query: the unformatted query from the configuration for this metric, resource: the object from the OpenStackClient resource_type: the type of the resource ['compute_node','instance', 'bare_metal', 'storage'], period: the period of time to collect metrics for in seconds, aggregate: the aggregation can be any from ['mean', 'max', 'min', 'count'], granularity: interval between datapoints in seconds (optional), } """ """Every grafana translator should have a uniquely identifying name""" NAME = '' RESOURCE_TYPES = base.DataSourceBase.RESOURCE_TYPES AGGREGATES = base.DataSourceBase.AGGREGATES def __init__(self, data): self._data = data self._validate_data() def _validate_data(self): """iterate through the supplied data and verify attributes""" optionals = ['granularity'] reference_data = { 'metric': None, 'db': None, 'attribute': None, 'query': None, 'resource': None, 'resource_type': None, 'period': None, 'aggregate': None, 'granularity': None } reference_data.update(self._data) for key, value in reference_data.items(): if value is None and key not in optionals: raise exception.InvalidParameter( message=(_("The value %(value)s for parameter " "%(parameter)s is invalid") % {'value': None, 'parameter': key} ) ) if reference_data['resource_type'] not in self.RESOURCE_TYPES: raise exception.InvalidParameter(parameter='resource_type', parameter_type='RESOURCE_TYPES') if reference_data['aggregate'] not in self.AGGREGATES: raise exception.InvalidParameter(parameter='aggregate', parameter_type='AGGREGATES') @staticmethod def _extract_attribute(resource, attribute): """Retrieve the desired attribute from the resource :param resource: The resource object to extract the attribute from. :param attribute: The name of the attribute to subtract as string. :return: The extracted attribute or None """ try: return getattr(resource, attribute) except AttributeError: raise @staticmethod def _query_format(query, aggregate, resource, period, granularity, translator_specific): return query.format(aggregate, resource, period, granularity, translator_specific) @abc.abstractmethod def build_params(self): """Build the set of parameters to send with the request""" raise NotImplementedError() @abc.abstractmethod def extract_result(self, raw_results): """Extrapolate the metric from the raw results of the request""" raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/datasources/grafana_translator/influxdb.py0000664000175000017500000000624000000000000032010 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from watcher.common import exception from watcher.decision_engine.datasources.grafana_translator.base import \ BaseGrafanaTranslator CONF = cfg.CONF LOG = log.getLogger(__name__) class InfluxDBGrafanaTranslator(BaseGrafanaTranslator): """Grafana translator to communicate with InfluxDB database""" NAME = 'influxdb' def __init__(self, data): super(InfluxDBGrafanaTranslator, self).__init__(data) def build_params(self): """""" data = self._data retention_period = None available_periods = CONF.grafana_translators.retention_periods.items() for key, value in sorted(available_periods, key=lambda x: x[1]): if int(data['period']) < int(value): retention_period = key break if retention_period is None: retention_period = max(available_periods)[0] LOG.warning("Longest retention period is to short for desired" " period") try: resource = self._extract_attribute( data['resource'], data['attribute']) except AttributeError: LOG.error("Resource: %s does not contain attribute %s", data['resource'], data['attribute']) raise # Granularity is optional if it is None the minimal value for InfluxDB # will be 1 granularity = \ data['granularity'] if data['granularity'] is not None else 1 return {'db': data['db'], 'epoch': 'ms', 'q': self._query_format( data['query'], data['aggregate'], resource, data['period'], granularity, retention_period)} def extract_result(self, raw_results): """""" try: # For result structure see: # https://docs.openstack.org/watcher/latest/datasources/grafana.html#InfluxDB result = jsonutils.loads(raw_results) result = result['results'][0]['series'][0] index_aggregate = result['columns'].index(self._data['aggregate']) return result['values'][0][index_aggregate] except KeyError: LOG.error("Could not extract %s for the resource: %s", self._data['metric'], self._data['resource']) raise exception.NoSuchMetricForHost( metric=self._data['metric'], host=self._data['resource']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/datasources/manager.py0000664000175000017500000001341100000000000025735 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import yaml from collections import OrderedDict from oslo_config import cfg from oslo_log import log from watcher.common import exception from watcher.decision_engine.datasources import gnocchi as gnoc from watcher.decision_engine.datasources import grafana as graf from watcher.decision_engine.datasources import monasca as mon from watcher.decision_engine.datasources import prometheus as prom LOG = log.getLogger(__name__) class DataSourceManager(object): metric_map = OrderedDict([ (gnoc.GnocchiHelper.NAME, gnoc.GnocchiHelper.METRIC_MAP), (mon.MonascaHelper.NAME, mon.MonascaHelper.METRIC_MAP), (graf.GrafanaHelper.NAME, graf.GrafanaHelper.METRIC_MAP), (prom.PrometheusHelper.NAME, prom.PrometheusHelper.METRIC_MAP), ]) """Dictionary with all possible datasources, dictionary order is the default order for attempting to use datasources """ def __init__(self, config=None, osc=None): self.osc = osc self.config = config self._monasca = None self._gnocchi = None self._grafana = None self._prometheus = None # Dynamically update grafana metric map, only available at runtime # The metric map can still be overridden by a yaml config file self.metric_map[graf.GrafanaHelper.NAME] = self.grafana.METRIC_MAP metric_map_path = cfg.CONF.watcher_decision_engine.metric_map_path metrics_from_file = self.load_metric_map(metric_map_path) for ds, mp in self.metric_map.items(): try: self.metric_map[ds].update(metrics_from_file.get(ds, {})) except KeyError: msgargs = (ds, self.metric_map.keys()) LOG.warning('Invalid Datasource: %s. Allowed: %s ', *msgargs) self.datasources = self.config.datasources if self.datasources and mon.MonascaHelper.NAME in self.datasources: LOG.warning('The monasca datasource is deprecated and will be ' 'removed in a future release.') @property def monasca(self): if self._monasca is None: self._monasca = mon.MonascaHelper(osc=self.osc) return self._monasca @monasca.setter def monasca(self, monasca): self._monasca = monasca @property def gnocchi(self): if self._gnocchi is None: self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) return self._gnocchi @gnocchi.setter def gnocchi(self, gnocchi): self._gnocchi = gnocchi @property def grafana(self): if self._grafana is None: self._grafana = graf.GrafanaHelper(osc=self.osc) return self._grafana @grafana.setter def grafana(self, grafana): self._grafana = grafana @property def prometheus(self): if self._prometheus is None: self._prometheus = prom.PrometheusHelper() return self._prometheus @prometheus.setter def prometheus(self, prometheus): self._prometheus = prometheus def get_backend(self, metrics): """Determine the datasource to use from the configuration Iterates over the configured datasources in order to find the first which can support all specified metrics. Upon a missing metric the next datasource is attempted. """ if not self.datasources or len(self.datasources) == 0: raise exception.NoDatasourceAvailable if not metrics or len(metrics) == 0: LOG.critical("Can not retrieve datasource without specifying " "list of required metrics.") raise exception.InvalidParameter(parameter='metrics', parameter_type='none empty list') for datasource in self.datasources: no_metric = False for metric in metrics: if (metric not in self.metric_map[datasource] or self.metric_map[datasource].get(metric) is None): no_metric = True LOG.warning( "Datasource: %s could not be used due to metric: %s", datasource, metric) break if not no_metric: # Try to use a specific datasource but attempt additional # datasources upon exceptions (if config has more datasources) try: ds = getattr(self, datasource) ds.METRIC_MAP.update(self.metric_map[ds.NAME]) return ds except Exception: pass # nosec: B110 raise exception.MetricNotAvailable(metric=metric) def load_metric_map(self, file_path): """Load metrics from the metric_map_path""" if file_path and os.path.exists(file_path): with open(file_path, 'r') as f: try: ret = yaml.safe_load(f.read()) # return {} if the file is empty return ret if ret else {} except yaml.YAMLError as e: LOG.warning('Could not load %s: %s', file_path, e) return {} else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/datasources/monasca.py0000664000175000017500000001544700000000000025757 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from monascaclient import exc from oslo_utils import timeutils from watcher.common import clients from watcher.decision_engine.datasources import base class MonascaHelper(base.DataSourceBase): NAME = 'monasca' METRIC_MAP = dict(host_cpu_usage='cpu.percent', host_ram_usage=None, host_outlet_temp=None, host_inlet_temp=None, host_airflow=None, host_power=None, instance_cpu_usage='vm.cpu.utilization_perc', instance_ram_usage=None, instance_ram_allocated=None, instance_l3_cache_usage=None, instance_root_disk_size=None, ) def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.monasca = self.osc.monasca() def _format_time_params(self, start_time, end_time, period): """Format time-related params to the correct Monasca format :param start_time: Start datetime from which metrics will be used :param end_time: End datetime from which metrics will be used :param period: interval in seconds (int) :return: start ISO time, end ISO time, period """ if not period: period = int(datetime.timedelta(hours=3).total_seconds()) if not start_time: start_time = ( timeutils.utcnow() - datetime.timedelta(seconds=period)) start_timestamp = None if not start_time else start_time.isoformat() end_timestamp = None if not end_time else end_time.isoformat() return start_timestamp, end_timestamp, period def query_retry_reset(self, exception_instance): if isinstance(exception_instance, exc.Unauthorized): self.osc.reset_clients() self.monasca = self.osc.monasca() def check_availability(self): result = self.query_retry(self.monasca.metrics.list) if result: return 'available' else: return 'not available' def list_metrics(self): # TODO(alexchadin): this method should be implemented in accordance to # monasca API. pass def statistic_aggregation(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): stop_time = timeutils.utcnow() start_time = stop_time - datetime.timedelta(seconds=(int(period))) meter = self._get_meter(meter_name) if aggregate == 'mean': aggregate = 'avg' raw_kwargs = dict( name=meter, start_time=start_time.isoformat(), end_time=stop_time.isoformat(), dimensions={'hostname': resource.uuid}, period=period, statistics=aggregate, group_by='*', ) kwargs = {k: v for k, v in raw_kwargs.items() if k and v} statistics = self.query_retry( f=self.monasca.metrics.list_statistics, **kwargs) cpu_usage = None for stat in statistics: avg_col_idx = stat['columns'].index(aggregate) values = [r[avg_col_idx] for r in stat['statistics']] value = float(sum(values)) / len(values) cpu_usage = value return cpu_usage def statistic_series(self, resource=None, resource_type=None, meter_name=None, start_time=None, end_time=None, granularity=300): meter = self._get_meter(meter_name) raw_kwargs = dict( name=meter, start_time=start_time.isoformat(), end_time=end_time.isoformat(), dimensions={'hostname': resource.uuid}, statistics='avg', group_by='*', ) kwargs = {k: v for k, v in raw_kwargs.items() if k and v} statistics = self.query_retry( f=self.monasca.metrics.list_statistics, **kwargs) result = {} for stat in statistics: v_index = stat['columns'].index('avg') t_index = stat['columns'].index('timestamp') result.update({r[t_index]: r[v_index] for r in stat['statistics']}) return result def get_host_cpu_usage(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_cpu_usage', period, aggregate, granularity) def get_host_ram_usage(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_host_outlet_temp(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_host_inlet_temp(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_host_airflow(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_host_power(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_instance_cpu_usage(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_cpu_usage', period, aggregate, granularity) def get_instance_ram_usage(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_instance_ram_allocated(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_instance_l3_cache_usage(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_instance_root_disk_size(self, resource, period, aggregate, granularity=None): raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/datasources/prometheus.py0000664000175000017500000005520200000000000026522 0ustar00zuulzuul00000000000000# Copyright 2024 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from observabilityclient import prometheus_client from oslo_config import cfg from oslo_log import log import re from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.datasources import base CONF = cfg.CONF LOG = log.getLogger(__name__) class PrometheusHelper(base.DataSourceBase): """PrometheusHelper class for retrieving metrics from Prometheus server This class implements the DataSourceBase to allow Watcher to query Prometheus as a data source for metrics. """ NAME = 'prometheus' METRIC_MAP = dict(host_cpu_usage='node_cpu_seconds_total', host_ram_usage='node_memory_MemAvailable_bytes', host_outlet_temp=None, host_inlet_temp=None, host_airflow=None, host_power=None, instance_cpu_usage='ceilometer_cpu', instance_ram_usage='ceilometer_memory_usage', instance_ram_allocated='instance.memory', instance_l3_cache_usage=None, instance_root_disk_size='instance.disk', ) AGGREGATES_MAP = dict(mean='avg', max='max', min='min', count='avg') def __init__(self): """Initialise the PrometheusHelper The prometheus helper uses the PrometheusAPIClient provided by python-observabilityclient. The prometheus_fqdn_instance_map maps the fqdn of each node to the Prometheus instance label added to all metrics on that node. When making queries to Prometheus we use the instance label to specify the node for which metrics are to be retrieved. host, port and fqdn_label come from watcher_client config. The prometheus_fqdn_label allows override of the required label in Prometheus scrape configs that specifies each target's fqdn. """ self.prometheus = self._setup_prometheus_client() self.prometheus_fqdn_label = ( CONF.prometheus_client.fqdn_label ) self.prometheus_fqdn_instance_map = ( self._build_prometheus_fqdn_instance_map() ) self.prometheus_host_instance_map = ( self._build_prometheus_host_instance_map() ) def _setup_prometheus_client(self): """Initialise the prometheus client with config options Use the prometheus_client options in watcher.conf to setup the PrometheusAPIClient client object and return it. :raises watcher.common.exception.MissingParameter if prometheus host or port is not set in the watcher.conf under the [prometheus_client] section. :raises watcher.common.exception.InvalidParameter if the prometheus host or port have invalid format. """ def _validate_host_port(host, port): if len(host) > 255: return (False, "hostname is too long: '%s'" % host) if host[-1] == '.': host = host[:-1] legal_hostname = re.compile( "(?!-)[a-z0-9-]{1,63}(?instance_label mapping needed for queries Watcher knows nodes by their hostname. In Prometheus however the scrape targets (also known as 'instances') are specified by I.P. (or hostname) and port number. This function creates a mapping between the fully qualified domain name of each node and the corresponding instance label used in the scrape config. This relies on a custom 'fqdn' label added to Prometheus scrape_configs. Operators can use a different custom label instead by setting the prometheus_fqdn_label config option under the prometheus_client section of watcher config. The built prometheus_fqdn_instance_map is used to match watcher node.hostname if watcher stores fqdn and otherwise the host_instance_map is used instead. :return a dict mapping fqdn to instance label. For example: {'marios-env-again.controlplane.domain': '10.1.2.3:9100'} """ prometheus_targets = self.prometheus._get( "targets?state=active")['data']['activeTargets'] # >>> prometheus_targets[0]['labels'] # {'fqdn': 'marios-env-again.controlplane.domain', # 'instance': 'localhost:9100', 'job': 'node'} fqdn_instance_map = { fqdn: instance for (fqdn, instance) in ( (target['labels'].get(self.prometheus_fqdn_label), target['labels'].get('instance')) for target in prometheus_targets if target.get('labels', {}).get(self.prometheus_fqdn_label) ) } if not fqdn_instance_map: LOG.error( "Could not create fqdn instance map from Prometheus " "targets config. Prometheus returned the following: %s", prometheus_targets ) return {} return fqdn_instance_map def _build_prometheus_host_instance_map(self): """Build the hostname<-->instance_label mapping needed for queries The prometheus_fqdn_instance_map has the fully qualified domain name for hosts. This will create a duplicate map containing only the host name part. Depending on the watcher node.hostname either the fqdn_instance_map or the host_instance_map will be used to resolve the correct prometheus instance label for queries. In the event the fqdn_instance_map keys are not valid fqdn (for example it contains hostnames, not fqdn) the host_instance_map cannot be created and an empty dictionary is returned with a warning logged. :return a dict mapping hostname to instance label. For example: {'marios-env-again': 'localhost:9100'} """ if not self.prometheus_fqdn_instance_map: LOG.error("Cannot build host_instance_map without " "fqdn_instance_map") return {} host_instance_map = { host: instance for (host, instance) in ( (fqdn.split('.')[0], inst) for fqdn, inst in self.prometheus_fqdn_instance_map.items() if '.' in fqdn ) } if not host_instance_map: LOG.warning("Creating empty host instance map. Are the keys " "in prometheus_fqdn_instance_map valid fqdn?") return {} return host_instance_map def _resolve_prometheus_instance_label(self, node_name): """Resolve the prometheus instance label to use in queries Given the watcher node.hostname, resolve the prometheus instance label for use in queries, first trying the fqdn_instance_map and then the host_instance_map (watcher.node_name can be fqdn or hostname). If the name is not resolved after the first attempt, rebuild the fqdn and host instance maps and try again. This allows for new hosts added after the initialisation of the fqdn_instance_map. :param node_name: the watcher node.hostname :return String for the prometheus instance label and None if not found """ def _query_maps(node): return self.prometheus_fqdn_instance_map.get( node, self.prometheus_host_instance_map.get(node, None)) instance_label = _query_maps(node_name) # refresh the fqdn and host instance maps and retry if not instance_label: self.prometheus_fqdn_instance_map = ( self._build_prometheus_fqdn_instance_map() ) self.prometheus_host_instance_map = ( self._build_prometheus_host_instance_map() ) instance_label = _query_maps(node_name) if not instance_label: LOG.error("Cannot query prometheus without instance label. " "Could not resolve %s", node_name) return None return instance_label def _resolve_prometheus_aggregate(self, watcher_aggregate, meter): """Resolve the prometheus aggregate using self.AGGREGATES_MAP This uses the AGGREGATES_MAP to resolve the correct prometheus aggregate to use in queries, from the given watcher aggregate """ if watcher_aggregate == 'count': LOG.warning('Prometheus data source does not currently support ' ' the count aggregate. Proceeding with mean (avg).') promql_aggregate = self.AGGREGATES_MAP.get(watcher_aggregate) if not promql_aggregate: raise exception.InvalidParameter( message=(_("Unknown Watcher aggregate %s. This does not " "resolve to any valid prometheus query aggregate.") % watcher_aggregate) ) return promql_aggregate def _build_prometheus_query(self, aggregate, meter, instance_label, period, resource=None): """Build and return the prometheus query string with the given args This function builds and returns the string query that will be sent to the Prometheus server /query endpoint. For host cpu usage we use: 100 - (avg by (instance)(rate(node_cpu_seconds_total{mode='idle', instance='some_host'}[300s])) * 100) so using prometheus rate function over the specified period, we average per instance (all cpus) idle time and then 'everything else' is cpu usage time. For host memory usage we use: (node_memory_MemTotal_bytes{instance='the_host'} - avg_over_time( node_memory_MemAvailable_bytes{instance='the_host'}[300s])) / 1024 / 1024 So we take total and subtract available memory to determine how much is in use. We use the prometheus xxx_over_time functions avg/max/min depending on the aggregate with the specified time period. :param aggregate: one of the values of self.AGGREGATES_MAP :param meter: the name of the Prometheus meter to use :param instance_label: the Prometheus instance label (scrape target). :param period: the period in seconds for which to query :param resource: the resource object for which metrics are requested :return: a String containing the Prometheus query :raises watcher.common.exception.InvalidParameter if params are None :raises watcher.common.exception.InvalidParameter if meter is not known or currently supported (prometheus meter name). """ query_args = None uuid_label_key = CONF.prometheus_client.instance_uuid_label if (meter is None or aggregate is None or instance_label is None or period is None): raise exception.InvalidParameter( message=(_( "Cannot build prometheus query without args. " "You provided: meter %(mtr)s, aggregate %(agg)s, " "instance_label %(inst)s, period %(prd)s") % {'mtr': meter, 'agg': aggregate, 'inst': instance_label, 'prd': period}) ) if meter == 'node_cpu_seconds_total': query_args = ( "100 - (%s by (instance)(rate(%s" "{mode='idle',instance='%s'}[%ss])) * 100)" % (aggregate, meter, instance_label, period) ) elif meter == 'node_memory_MemAvailable_bytes': query_args = ( "(node_memory_MemTotal_bytes{instance='%s'} " "- %s_over_time(%s{instance='%s'}[%ss])) " "/ 1024 / 1024" % (instance_label, aggregate, meter, instance_label, period) ) elif meter == 'ceilometer_memory_usage': query_args = ( "%s_over_time(%s{%s='%s'}[%ss])" % (aggregate, meter, uuid_label_key, instance_label, period) ) elif meter == 'ceilometer_cpu': # We are converting the total cumulative cpu time (ns) to cpu usage # percentage so we need to divide between the number of vcpus. # As this is a percentage metric, we set a max level of 100. It has # been observed in very high usage cases, prometheus reporting # values higher that 100 what can lead to unexpected behaviors. vcpus = resource.vcpus if not vcpus: LOG.warning( "instance vcpu count not set for instance %s, assuming 1", instance_label ) vcpus = 1 query_args = ( "clamp_max((%s by (instance)(rate(%s{%s='%s'}[%ss]))/10e+8) " "*(100/%s), 100)" % (aggregate, meter, uuid_label_key, instance_label, period, vcpus) ) else: raise exception.InvalidParameter( message=(_("Cannot process prometheus meter %s") % meter) ) return query_args def check_availability(self): """check if Prometheus server is available for queries Performs HTTP get on the prometheus API /status/runtimeinfo endpoint. The prometheus_client will raise a PrometheuAPIClientError if the call is unsuccessful, which is caught here and a warning logged. """ try: self.prometheus._get("status/runtimeinfo") except prometheus_client.PrometheusAPIClientError: LOG.warning( "check_availability raised PrometheusAPIClientError. " "Is Prometheus server down?" ) return 'not available' return 'available' def list_metrics(self): """Fetch all prometheus metrics from api/v1/label/__name__/values The prometheus_client will raise a PrometheuAPIClientError if the call is unsuccessful, which is caught here and a warning logged. """ try: response = self.prometheus._get("label/__name__/values") except prometheus_client.PrometheusAPIClientError: LOG.warning( "list_metrics raised PrometheusAPIClientError. Is Prometheus" "server down?" ) return set() return set(response['data']) def statistic_aggregation(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): meter = self._get_meter(meter_name) query_args = '' instance_label = '' # For instance resource type, the datasource expects the uuid of the # instance to be assigned to a label in the prometheus metrics, with a # specific key value. if resource_type == 'compute_node': instance_label = self._resolve_prometheus_instance_label( resource.hostname) elif resource_type == 'instance': instance_label = resource.uuid # For ram_allocated and root_disk size metrics there are no valid # values in the prometheus backend store. We rely in the values # provided in the vms inventory. if meter == 'instance.memory': return float(resource.memory) elif meter == 'instance.disk': return float(resource.disk) else: LOG.warning( "Prometheus data source does not currently support " "resource_type %s", resource_type ) return None promql_aggregate = self._resolve_prometheus_aggregate(aggregate, meter) query_args = self._build_prometheus_query( promql_aggregate, meter, instance_label, period, resource ) if not query_args: LOG.error("Cannot proceed without valid prometheus query") return None result = self.query_retry( self.prometheus.query, query_args, ignored_exc=prometheus_client.PrometheusAPIClientError, ) return float(result[0].value) if result else None def statistic_series(self, resource=None, resource_type=None, meter_name=None, start_time=None, end_time=None, granularity=300): raise NotImplementedError( _('Prometheus helper currently does not support statistic_series. ' 'This can be considered for future enhancement.')) def _invert_max_min_aggregate(self, agg): """Invert max and min for node/host metric queries from node-exporter because we query for 'idle'/'unused' cpu and memory. For Watcher 'max cpu used' we query for prometheus 'min idle time'. For Watcher 'max memory used' we retrieve min 'unused'/'available' memory from Prometheus. This internal function is used exclusively by get_host_cpu_usage and get_host_ram_usage. :param agg: the metric collection aggregate :return: a String aggregate """ if agg == 'max': return 'min' elif agg == 'min': return 'max' return agg def get_host_cpu_usage(self, resource, period=300, aggregate="mean", granularity=None): """Query prometheus for node_cpu_seconds_total This calculates the host cpu usage and returns it as a percentage The calculation is made by using the cpu 'idle' time, per instance (so all CPUs are included). For example the query looks like (100 - (avg by (instance)(rate(node_cpu_seconds_total {mode='idle',instance='localhost:9100'}[300s])) * 100)) """ aggregate = self._invert_max_min_aggregate(aggregate) cpu_usage = self.statistic_aggregation( resource, 'compute_node', 'host_cpu_usage', period=period, granularity=granularity, aggregate=aggregate) return float(cpu_usage) if cpu_usage else None def get_host_ram_usage(self, resource, period=300, aggregate="mean", granularity=None): aggregate = self._invert_max_min_aggregate(aggregate) ram_usage = self.statistic_aggregation( resource, 'compute_node', 'host_ram_usage', period=period, granularity=granularity, aggregate=aggregate) return float(ram_usage) if ram_usage else None def get_instance_ram_usage(self, resource, period=300, aggregate="mean", granularity=None): ram_usage = self.statistic_aggregation( resource, 'instance', 'instance_ram_usage', period=period, granularity=granularity, aggregate=aggregate) return ram_usage def get_instance_cpu_usage(self, resource, period=300, aggregate="mean", granularity=None): cpu_usage = self.statistic_aggregation( resource, 'instance', 'instance_cpu_usage', period=period, granularity=granularity, aggregate=aggregate) return cpu_usage def get_instance_ram_allocated(self, resource, period=300, aggregate="mean", granularity=None): ram_allocated = self.statistic_aggregation( resource, 'instance', 'instance_ram_allocated', period=period, granularity=granularity, aggregate=aggregate) return ram_allocated def get_instance_root_disk_size(self, resource, period=300, aggregate="mean", granularity=None): root_disk_size = self.statistic_aggregation( resource, 'instance', 'instance_root_disk_size', period=period, granularity=granularity, aggregate=aggregate) return root_disk_size ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/gmr.py0000664000175000017500000000274000000000000022576 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_reports import guru_meditation_report as gmr from watcher._i18n import _ from watcher.decision_engine.model.collector import manager def register_gmr_plugins(): """Register GMR plugins that are specific to watcher-decision-engine.""" gmr.TextGuruMeditation.register_section(_('CDMCs'), show_models) def show_models(): """Create a formatted output of all the CDMs Mainly used as a Guru Meditation Report (GMR) plugin """ mgr = manager.CollectorManager() output = [] for name, cdmc in mgr.get_collectors().items(): output.append("") output.append("~" * len(name)) output.append(name) output.append("~" * len(name)) output.append("") cdmc_struct = cdmc.cluster_data_model.to_string() output.append(cdmc_struct) return "\n".join(output) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/decision_engine/goal/0000775000175000017500000000000000000000000022356 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/goal/__init__.py0000664000175000017500000000224500000000000024472 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.goal import goals Dummy = goals.Dummy ServerConsolidation = goals.ServerConsolidation ThermalOptimization = goals.ThermalOptimization Unclassified = goals.Unclassified WorkloadBalancing = goals.WorkloadBalancing NoisyNeighborOptimization = goals.NoisyNeighborOptimization SavingEnergy = goals.SavingEnergy HardwareMaintenance = goals.HardwareMaintenance __all__ = ("Dummy", "ServerConsolidation", "ThermalOptimization", "Unclassified", "WorkloadBalancing", "NoisyNeighborOptimization", "SavingEnergy", "HardwareMaintenance") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/goal/base.py0000664000175000017500000000373300000000000023650 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from watcher.common.loader import loadable class Goal(loadable.Loadable, metaclass=abc.ABCMeta): def __init__(self, config): super(Goal, self).__init__(config) self.name = self.get_name() self.display_name = self.get_display_name() self.efficacy_specification = self.get_efficacy_specification() @classmethod @abc.abstractmethod def get_name(cls): """Name of the goal: should be identical to the related entry point""" raise NotImplementedError() @classmethod @abc.abstractmethod def get_display_name(cls): """The goal display name for the goal""" raise NotImplementedError() @classmethod @abc.abstractmethod def get_translatable_display_name(cls): """The translatable msgid of the goal""" # Note(v-francoise): Defined here to be used as the translation key for # other services raise NotImplementedError() @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] @abc.abstractmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" raise NotImplementedError() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/decision_engine/goal/efficacy/0000775000175000017500000000000000000000000024127 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/goal/efficacy/__init__.py0000664000175000017500000000000000000000000026226 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/goal/efficacy/base.py0000664000175000017500000000607700000000000025425 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An efficacy specification is a contract that is associated to each :ref:`Goal ` that defines the various :ref:`efficacy indicators ` a strategy achieving the associated goal should provide within its :ref:`solution `. Indeed, each solution proposed by a strategy will be validated against this contract before calculating its :ref:`global efficacy `. """ import abc import jsonschema from oslo_serialization import jsonutils class EfficacySpecification(object, metaclass=abc.ABCMeta): def __init__(self): self._indicators_specs = self.get_indicators_specifications() @property def indicators_specs(self): return self._indicators_specs @abc.abstractmethod def get_indicators_specifications(self): """List the specifications of the indicator for this efficacy spec :return: Tuple of indicator specifications :rtype: Tuple of :py:class:`~.IndicatorSpecification` instances """ raise NotImplementedError() @abc.abstractmethod def get_global_efficacy_indicator(self, indicators_map): """Compute the global efficacy for the goal it achieves :param indicators_map: dict-like object containing the efficacy indicators related to this spec :type indicators_map: :py:class:`~.IndicatorsMap` instance :raises: NotImplementedError :returns: :py:class:`~.Indicator` instance list, each instance specify global efficacy for different openstack resource. """ raise NotImplementedError() @property def schema(self): """Combined schema from the schema of the indicators""" schema = { "type": "object", "properties": {}, "required": [] } for indicator in self.indicators_specs: schema["properties"][indicator.name] = indicator.schema if indicator.required: schema["required"].append(indicator.name) return schema def validate_efficacy_indicators(self, indicators_map): if indicators_map: jsonschema.validate(indicators_map, self.schema) else: True def get_indicators_specs_dicts(self): return [indicator.to_dict() for indicator in self.indicators_specs] def serialize_indicators_specs(self): return jsonutils.dumps(self.get_indicators_specs_dicts()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/goal/efficacy/indicators.py0000664000175000017500000002007700000000000026646 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import jsonschema from jsonschema import SchemaError from jsonschema import ValidationError from oslo_log import log from oslo_serialization import jsonutils from watcher._i18n import _ from watcher.common import exception LOG = log.getLogger(__name__) class IndicatorSpecification(object, metaclass=abc.ABCMeta): def __init__(self, name=None, description=None, unit=None, required=True): self.name = name self.description = description self.unit = unit self.required = required @property @abc.abstractmethod def schema(self): """JsonSchema used to validate the indicator value :return: A Schema """ raise NotImplementedError() @classmethod def validate(cls, solution): """Validate the given solution :raises: :py:class:`~.InvalidIndicatorValue` when the validation fails """ indicator = cls() value = None try: value = getattr(solution, indicator.name) jsonschema.validate(value, cls.schema) except (SchemaError, ValidationError) as exc: LOG.exception(exc) raise except Exception as exc: LOG.exception(exc) raise exception.InvalidIndicatorValue( name=indicator.name, value=value, spec_type=type(indicator)) def to_dict(self): return { "name": self.name, "description": self.description, "unit": self.unit, "schema": jsonutils.dumps(self.schema) if self.schema else None, } def __str__(self): return str(self.to_dict()) class ComputeNodesCount(IndicatorSpecification): def __init__(self): super(ComputeNodesCount, self).__init__( name="compute_nodes_count", description=_("The total number of enabled compute nodes."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class ReleasedComputeNodesCount(IndicatorSpecification): def __init__(self): super(ReleasedComputeNodesCount, self).__init__( name="released_compute_nodes_count", description=_("The number of compute nodes to be released."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class InstancesCount(IndicatorSpecification): def __init__(self): super(InstancesCount, self).__init__( name="instances_count", description=_("The total number of audited instances in " "strategy."), unit=None, required=False, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class InstanceMigrationsCount(IndicatorSpecification): def __init__(self): super(InstanceMigrationsCount, self).__init__( name="instance_migrations_count", description=_("The number of VM migrations to be performed."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class LiveInstanceMigrateCount(IndicatorSpecification): def __init__(self): super(LiveInstanceMigrateCount, self).__init__( name="live_migrate_instance_count", description=_("The number of instances actually live migrated."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class PlannedLiveInstanceMigrateCount(IndicatorSpecification): def __init__(self): super(PlannedLiveInstanceMigrateCount, self).__init__( name="planned_live_migrate_instance_count", description=_("The number of instances planned to live migrate."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class ColdInstanceMigrateCount(IndicatorSpecification): def __init__(self): super(ColdInstanceMigrateCount, self).__init__( name="cold_migrate_instance_count", description=_("The number of instances actually cold migrated."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class PlannedColdInstanceMigrateCount(IndicatorSpecification): def __init__(self): super(PlannedColdInstanceMigrateCount, self).__init__( name="planned_cold_migrate_instance_count", description=_("The number of instances planned to cold migrate."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class VolumeMigrateCount(IndicatorSpecification): def __init__(self): super(VolumeMigrateCount, self).__init__( name="volume_migrate_count", description=_("The number of detached volumes actually migrated."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class PlannedVolumeMigrateCount(IndicatorSpecification): def __init__(self): super(PlannedVolumeMigrateCount, self).__init__( name="planned_volume_migrate_count", description=_("The number of detached volumes planned" " to migrate."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class VolumeUpdateCount(IndicatorSpecification): def __init__(self): super(VolumeUpdateCount, self).__init__( name="volume_update_count", description=_("The number of attached volumes actually" " migrated."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class PlannedVolumeUpdateCount(IndicatorSpecification): def __init__(self): super(PlannedVolumeUpdateCount, self).__init__( name="planned_volume_update_count", description=_("The number of attached volumes planned to" " migrate."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class StandardDeviationValue(IndicatorSpecification): def __init__(self): super(StandardDeviationValue, self).__init__( name="standard_deviation_after_audit", description=_("The value of resulted standard deviation."), unit=None, required=False, ) @property def schema(self): return { "type": "number", "minimum": 0 } class OriginalStandardDeviationValue(IndicatorSpecification): def __init__(self): super(OriginalStandardDeviationValue, self).__init__( name="standard_deviation_before_audit", description=_("The value of original standard deviation."), unit=None, required=False, ) @property def schema(self): return { "type": "number", "minimum": 0 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/goal/efficacy/specs.py0000664000175000017500000001360000000000000025616 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher._i18n import _ from watcher.decision_engine.goal.efficacy import base from watcher.decision_engine.goal.efficacy import indicators from watcher.decision_engine.solution import efficacy class Unclassified(base.EfficacySpecification): def get_indicators_specifications(self): return () def get_global_efficacy_indicator(self, indicators_map): return None class ServerConsolidation(base.EfficacySpecification): def get_indicators_specifications(self): return [ indicators.ComputeNodesCount(), indicators.ReleasedComputeNodesCount(), indicators.InstanceMigrationsCount(), ] def get_global_efficacy_indicator(self, indicators_map=None): value = 0 global_efficacy = [] if indicators_map and indicators_map.compute_nodes_count > 0: value = (float(indicators_map.released_compute_nodes_count) / float(indicators_map.compute_nodes_count)) * 100 global_efficacy.append(efficacy.Indicator( name="released_nodes_ratio", description=_("Ratio of released compute nodes divided by the " "total number of enabled compute nodes."), unit='%', value=value, )) return global_efficacy class WorkloadBalancing(base.EfficacySpecification): def get_indicators_specifications(self): return [ indicators.InstanceMigrationsCount(), indicators.InstancesCount(), indicators.StandardDeviationValue(), indicators.OriginalStandardDeviationValue() ] def get_global_efficacy_indicator(self, indicators_map=None): gl_indicators = [] mig_value = 0 if indicators_map and indicators_map.instance_migrations_count > 0: mig_value = ( indicators_map.instance_migrations_count / float(indicators_map.instances_count) * 100) gl_indicators.append(efficacy.Indicator( name="live_migrations_count", description=_("Ratio of migrated virtual machines to audited " "virtual machines"), unit='%', value=mig_value)) return gl_indicators class HardwareMaintenance(base.EfficacySpecification): def get_indicators_specifications(self): return [ indicators.LiveInstanceMigrateCount(), indicators.PlannedLiveInstanceMigrateCount(), indicators.ColdInstanceMigrateCount(), indicators.PlannedColdInstanceMigrateCount(), indicators.VolumeMigrateCount(), indicators.PlannedVolumeMigrateCount(), indicators.VolumeUpdateCount(), indicators.PlannedVolumeUpdateCount() ] def get_global_efficacy_indicator(self, indicators_map=None): li_value = 0 if (indicators_map and indicators_map.planned_live_migrate_instance_count > 0): li_value = ( float(indicators_map.planned_live_migrate_instance_count) / float(indicators_map.live_migrate_instance_count) * 100 ) li_indicator = efficacy.Indicator( name="live_instance_migrate_ratio", description=_("Ratio of actual live migrated instances " "to planned live migrate instances."), unit='%', value=li_value) ci_value = 0 if (indicators_map and indicators_map.planned_cold_migrate_instance_count > 0): ci_value = ( float(indicators_map.planned_cold_migrate_instance_count) / float(indicators_map.cold_migrate_instance_count) * 100 ) ci_indicator = efficacy.Indicator( name="cold_instance_migrate_ratio", description=_("Ratio of actual cold migrated instances " "to planned cold migrate instances."), unit='%', value=ci_value) dv_value = 0 if (indicators_map and indicators_map.planned_volume_migrate_count > 0): dv_value = (float(indicators_map.planned_volume_migrate_count) / float(indicators_map. volume_migrate_count) * 100) dv_indicator = efficacy.Indicator( name="volume_migrate_ratio", description=_("Ratio of actual detached volumes migrated to" " planned detached volumes migrate."), unit='%', value=dv_value) av_value = 0 if (indicators_map and indicators_map.planned_volume_update_count > 0): av_value = (float(indicators_map.planned_volume_update_count) / float(indicators_map. volume_update_count) * 100) av_indicator = efficacy.Indicator( name="volume_update_ratio", description=_("Ratio of actual attached volumes migrated to" " planned attached volumes migrate."), unit='%', value=av_value) return [li_indicator, ci_indicator, dv_indicator, av_indicator] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/goal/goals.py0000664000175000017500000001520500000000000024040 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher._i18n import _ from watcher.decision_engine.goal import base from watcher.decision_engine.goal.efficacy import specs class Dummy(base.Goal): """Dummy Reserved goal that is used for testing purposes. """ @classmethod def get_name(cls): return "dummy" @classmethod def get_display_name(cls): return _("Dummy goal") @classmethod def get_translatable_display_name(cls): return "Dummy goal" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class Unclassified(base.Goal): """Unclassified This goal is used to ease the development process of a strategy. Containing no actual indicator specification, this goal can be used whenever a strategy has yet to be formally associated with an existing goal. If the goal achieve has been identified but there is no available implementation, this Goal can also be used as a transitional stage. """ @classmethod def get_name(cls): return "unclassified" @classmethod def get_display_name(cls): return _("Unclassified") @classmethod def get_translatable_display_name(cls): return "Unclassified" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class ServerConsolidation(base.Goal): """ServerConsolidation This goal is for efficient usage of compute server resources in order to reduce the total number of servers. """ @classmethod def get_name(cls): return "server_consolidation" @classmethod def get_display_name(cls): return _("Server Consolidation") @classmethod def get_translatable_display_name(cls): return "Server Consolidation" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.ServerConsolidation() class ThermalOptimization(base.Goal): """ThermalOptimization This goal is used to balance the temperature across different servers. """ @classmethod def get_name(cls): return "thermal_optimization" @classmethod def get_display_name(cls): return _("Thermal Optimization") @classmethod def get_translatable_display_name(cls): return "Thermal Optimization" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class WorkloadBalancing(base.Goal): """WorkloadBalancing This goal is used to evenly distribute workloads across different servers. """ @classmethod def get_name(cls): return "workload_balancing" @classmethod def get_display_name(cls): return _("Workload Balancing") @classmethod def get_translatable_display_name(cls): return "Workload Balancing" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.WorkloadBalancing() class AirflowOptimization(base.Goal): """AirflowOptimization This goal is used to optimize the airflow within a cloud infrastructure. """ @classmethod def get_name(cls): return "airflow_optimization" @classmethod def get_display_name(cls): return _("Airflow Optimization") @classmethod def get_translatable_display_name(cls): return "Airflow Optimization" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class NoisyNeighborOptimization(base.Goal): """NoisyNeighborOptimization This goal is used to identify and migrate a Noisy Neighbor - a low priority VM that negatively affects performance of a high priority VM in terms of IPC by over utilizing Last Level Cache. """ @classmethod def get_name(cls): return "noisy_neighbor" @classmethod def get_display_name(cls): return _("Noisy Neighbor") @classmethod def get_translatable_display_name(cls): return "Noisy Neighbor" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class SavingEnergy(base.Goal): """SavingEnergy This goal is used to reduce power consumption within a data center. """ @classmethod def get_name(cls): return "saving_energy" @classmethod def get_display_name(cls): return _("Saving Energy") @classmethod def get_translatable_display_name(cls): return "Saving Energy" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class HardwareMaintenance(base.Goal): """HardwareMaintenance This goal is to migrate instances and volumes on a set of compute nodes and storage from nodes under maintenance """ @classmethod def get_name(cls): return "hardware_maintenance" @classmethod def get_display_name(cls): return _("Hardware Maintenance") @classmethod def get_translatable_display_name(cls): return "Hardware Maintenance" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.HardwareMaintenance() class ClusterMaintaining(base.Goal): """ClusterMaintenance This goal is used to maintain compute nodes without having the user's application being interrupted. """ @classmethod def get_name(cls): return "cluster_maintaining" @classmethod def get_display_name(cls): return _("Cluster Maintaining") @classmethod def get_translatable_display_name(cls): return "Cluster Maintaining" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/decision_engine/loading/0000775000175000017500000000000000000000000023051 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/loading/__init__.py0000664000175000017500000000000000000000000025150 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/loading/default.py0000664000175000017500000000361000000000000025047 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # Vincent FRANCOISE # Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from watcher.common.loader import default class DefaultStrategyLoader(default.DefaultLoader): def __init__(self): super(DefaultStrategyLoader, self).__init__( namespace='watcher_strategies') class DefaultGoalLoader(default.DefaultLoader): def __init__(self): super(DefaultGoalLoader, self).__init__( namespace='watcher_goals') class DefaultPlannerLoader(default.DefaultLoader): def __init__(self): super(DefaultPlannerLoader, self).__init__( namespace='watcher_planners') class ClusterDataModelCollectorLoader(default.DefaultLoader): def __init__(self): super(ClusterDataModelCollectorLoader, self).__init__( namespace='watcher_cluster_data_model_collectors') class DefaultScoringLoader(default.DefaultLoader): def __init__(self): super(DefaultScoringLoader, self).__init__( namespace='watcher_scoring_engines') class DefaultScoringContainerLoader(default.DefaultLoader): def __init__(self): super(DefaultScoringContainerLoader, self).__init__( namespace='watcher_scoring_engine_containers') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/manager.py0000664000175000017500000000552500000000000023427 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # Copyright (c) 2016 Intel Corp # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ This component is responsible for computing a set of potential optimization :ref:`Actions ` in order to fulfill the :ref:`Goal ` of an :ref:`Audit `. It first reads the parameters of the :ref:`Audit ` from the associated :ref:`Audit Template ` and knows the :ref:`Goal ` to achieve. It then selects the most appropriate :ref:`Strategy ` depending on how Watcher was configured for this :ref:`Goal `. The :ref:`Strategy ` is then executed and generates a set of :ref:`Actions ` which are scheduled in time by the :ref:`Watcher Planner ` (i.e., it generates an :ref:`Action Plan `). See :doc:`../architecture` for more details on this component. """ from watcher.common import service_manager from watcher import conf from watcher.decision_engine.messaging import audit_endpoint from watcher.decision_engine.messaging import data_model_endpoint from watcher.decision_engine.model.collector import manager from watcher.decision_engine.strategy.strategies import base \ as strategy_endpoint CONF = conf.CONF class DecisionEngineManager(service_manager.ServiceManager): @property def service_name(self): return 'watcher-decision-engine' @property def api_version(self): return '1.0' @property def publisher_id(self): return CONF.watcher_decision_engine.publisher_id @property def conductor_topic(self): return CONF.watcher_decision_engine.conductor_topic @property def notification_topics(self): return CONF.watcher_decision_engine.notification_topics @property def conductor_endpoints(self): return [audit_endpoint.AuditEndpoint, strategy_endpoint.StrategyEndpoint, data_model_endpoint.DataModelEndpoint] @property def notification_endpoints(self): return self.collector_manager.get_notification_endpoints() @property def collector_manager(self): return manager.CollectorManager() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/decision_engine/messaging/0000775000175000017500000000000000000000000023411 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/messaging/__init__.py0000664000175000017500000000000000000000000025510 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/messaging/audit_endpoint.py0000664000175000017500000000414000000000000026770 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import futurist from oslo_config import cfg from oslo_log import log from watcher.decision_engine.audit import continuous as c_handler from watcher.decision_engine.audit import event as e_handler from watcher.decision_engine.audit import oneshot as o_handler from watcher import objects CONF = cfg.CONF LOG = log.getLogger(__name__) class AuditEndpoint(object): def __init__(self, messaging): self._messaging = messaging self._executor = futurist.GreenThreadPoolExecutor( max_workers=CONF.watcher_decision_engine.max_audit_workers) self._oneshot_handler = o_handler.OneShotAuditHandler() self._continuous_handler = c_handler.ContinuousAuditHandler().start() self._event_handler = e_handler.EventAuditHandler() @property def executor(self): return self._executor def do_trigger_audit(self, context, audit_uuid): audit = objects.Audit.get_by_uuid(context, audit_uuid, eager=True) if audit.audit_type == objects.audit.AuditType.ONESHOT.value: self._oneshot_handler.execute(audit, context) if audit.audit_type == objects.audit.AuditType.EVENT.value: self._event_handler.execute(audit, context) def trigger_audit(self, context, audit_uuid): LOG.debug("Trigger audit %s", audit_uuid) self.executor.submit(self.do_trigger_audit, context, audit_uuid) return audit_uuid ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/messaging/data_model_endpoint.py0000664000175000017500000000420400000000000027754 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2019 ZTE corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from watcher.common import exception from watcher.common import utils from watcher.decision_engine.model.collector import manager from watcher import objects class DataModelEndpoint(object): def __init__(self, messaging): self._messaging = messaging def get_audit_scope(self, context, audit=None): scope = None try: if utils.is_uuid_like(audit) or utils.is_int_like(audit): audit = objects.Audit.get( context, audit) else: audit = objects.Audit.get_by_name( context, audit) except exception.AuditNotFound: raise exception.InvalidIdentity(identity=audit) if audit: scope = audit.scope else: scope = [] return scope def get_data_model_info(self, context, data_model_type='compute', audit=None): if audit is not None: scope = self.get_audit_scope(context, audit) else: scope = [] collector_manager = manager.CollectorManager() collector = collector_manager.get_cluster_model_collector( data_model_type) audit_scope_handler = collector.get_audit_scope_handler( audit_scope=scope) available_data_model = audit_scope_handler.get_scoped_model( collector.get_latest_cluster_data_model()) if not available_data_model: return {"context": []} return {"context": available_data_model.to_list()} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6311352 python_watcher-14.0.0/watcher/decision_engine/model/0000775000175000017500000000000000000000000022534 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/__init__.py0000664000175000017500000000000000000000000024633 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/base.py0000664000175000017500000000215300000000000024021 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ This component is in charge of executing the :ref:`Action Plan ` built by the :ref:`Watcher Decision Engine `. See: :doc:`../architecture` for more details on this component. """ import abc class Model(object, metaclass=abc.ABCMeta): @abc.abstractmethod def to_string(self): raise NotImplementedError() @abc.abstractmethod def to_xml(self): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6351352 python_watcher-14.0.0/watcher/decision_engine/model/collector/0000775000175000017500000000000000000000000024522 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/collector/__init__.py0000664000175000017500000000000000000000000026621 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/collector/base.py0000664000175000017500000002255300000000000026015 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A :ref:`Cluster Data Model ` (or CDM) is a logical representation of the current state and topology of the :ref:`Cluster ` :ref:`Managed resources `. It is represented as a set of :ref:`Managed resources ` (which may be a simple tree or a flat list of key-value pairs) which enables Watcher :ref:`Strategies ` to know the current relationships between the different :ref:`resources ` of the :ref:`Cluster ` during an :ref:`Audit ` and enables the :ref:`Strategy ` to request information such as: - What compute nodes are in a given :ref:`Audit Scope `? - What :ref:`Instances ` are hosted on a given compute node? - What is the current load of a compute node? - What is the current free memory of a compute node? - What is the network link between two compute nodes? - What is the available bandwidth on a given network link? - What is the current space available on a given virtual disk of a given :ref:`Instance ` ? - What is the current state of a given :ref:`Instance `? - ... In a word, this data model enables the :ref:`Strategy ` to know: - the current topology of the :ref:`Cluster ` - the current capacity for each :ref:`Managed resource ` - the current amount of used/free space for each :ref:`Managed resource ` - the current state of each :ref:`Managed resources ` In the Watcher project, we aim at providing a some generic and basic :ref:`Cluster Data Model ` for each :ref:`Goal `, usable in the associated :ref:`Strategies ` through a plugin-based mechanism which are called cluster data model collectors (or CDMCs). These CDMCs are responsible for loading and keeping up-to-date their associated CDM by listening to events and also periodically rebuilding themselves from the ground up. They are also directly accessible from the strategies classes. These CDMs are used to: - simplify the development of a new :ref:`Strategy ` for a given :ref:`Goal ` when there already are some existing :ref:`Strategies ` associated to the same :ref:`Goal ` - avoid duplicating the same code in several :ref:`Strategies ` associated to the same :ref:`Goal ` - have a better consistency between the different :ref:`Strategies ` for a given :ref:`Goal ` - avoid any strong coupling with any external :ref:`Cluster Data Model ` (the proposed data model acts as a pivot data model) There may be various :ref:`generic and basic Cluster Data Models ` proposed in Watcher helpers, each of them being adapted to achieving a given :ref:`Goal `: - For example, for a :ref:`Goal ` which aims at optimizing the network :ref:`resources ` the :ref:`Strategy ` may need to know which :ref:`resources ` are communicating together. - Whereas for a :ref:`Goal ` which aims at optimizing thermal and power conditions, the :ref:`Strategy ` may need to know the location of each compute node in the racks and the location of each rack in the room. Note however that a developer can use his/her own :ref:`Cluster Data Model ` if the proposed data model does not fit his/her needs as long as the :ref:`Strategy ` is able to produce a :ref:`Solution ` for the requested :ref:`Goal `. For example, a developer could rely on the Nova Data Model to optimize some compute resources. The :ref:`Cluster Data Model ` may be persisted in any appropriate storage system (SQL database, NoSQL database, JSON file, XML File, In Memory Database, ...). As of now, an in-memory model is built and maintained in the background in order to accelerate the execution of strategies. """ import abc import copy import threading import time from oslo_config import cfg from oslo_log import log from watcher.common import clients from watcher.common.loader import loadable from watcher.decision_engine.model import model_root LOG = log.getLogger(__name__) CONF = cfg.CONF class BaseClusterDataModelCollector(loadable.LoadableSingleton, metaclass=abc.ABCMeta): STALE_MODEL = model_root.ModelRoot(stale=True) def __init__(self, config, osc=None): super(BaseClusterDataModelCollector, self).__init__(config) self.osc = osc if osc else clients.OpenStackClients() self.lock = threading.RLock() self._audit_scope_handler = None self._cluster_data_model = None self._data_model_scope = None @property def cluster_data_model(self): if self._cluster_data_model is None: self.lock.acquire() self._cluster_data_model = self.execute() self.lock.release() return self._cluster_data_model @cluster_data_model.setter def cluster_data_model(self, model): self.lock.acquire() self._cluster_data_model = model self.lock.release() @property @abc.abstractmethod def notification_endpoints(self): """Associated notification endpoints :return: Associated notification endpoints :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances """ raise NotImplementedError() def set_cluster_data_model_as_stale(self): self.cluster_data_model = self.STALE_MODEL @abc.abstractmethod def get_audit_scope_handler(self, audit_scope): """Get audit scope handler""" raise NotImplementedError() @abc.abstractmethod def execute(self): """Build a cluster data model""" raise NotImplementedError() @classmethod def get_config_opts(cls): return [ cfg.IntOpt( 'period', default=3600, help='The time interval (in seconds) between each ' 'synchronization of the model'), ] def get_latest_cluster_data_model(self): LOG.debug("Creating copy") LOG.debug(self.cluster_data_model.to_xml()) return copy.deepcopy(self.cluster_data_model) def synchronize(self): """Synchronize the cluster data model Whenever called this synchronization will perform a drop-in replacement with the existing cluster data model """ self.cluster_data_model = self.execute() class BaseModelBuilder(object): def call_retry(self, f, *args, **kwargs): """Attempts to call external service Attempts to access data from the external service and handles exceptions. The retrieval should be retried in accordance to the value of api_call_retries :param f: The method that performs the actual querying for metrics :param args: Array of arguments supplied to the method :param kwargs: The amount of arguments supplied to the method :return: The value as retrieved from the external service """ num_retries = CONF.collector.api_call_retries timeout = CONF.collector.api_query_timeout for i in range(num_retries): try: return f(*args, **kwargs) except Exception as e: LOG.exception(e) self.call_retry_reset(e) LOG.warning("Retry %d of %d, error while calling service " "retry in %s seconds", i+1, num_retries, timeout) time.sleep(timeout) raise @abc.abstractmethod def call_retry_reset(self, exc): """Attempt to recover after encountering an error Recover from errors while calling external services, the exception can be used to make a better decision on how to best recover. """ pass @abc.abstractmethod def execute(self, model_scope): """Build the cluster data model limited to the scope and return it Builds the cluster data model with respect to the supplied scope. The schema of this scope will depend on the type of ModelBuilder. """ raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/collector/cinder.py0000664000175000017500000002664400000000000026354 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from watcher.common import cinder_helper from watcher.common import exception from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import element from watcher.decision_engine.model import model_root from watcher.decision_engine.model.notification import cinder from watcher.decision_engine.scope import storage as storage_scope LOG = log.getLogger(__name__) class CinderClusterDataModelCollector(base.BaseClusterDataModelCollector): """Cinder cluster data model collector The Cinder cluster data model collector creates an in-memory representation of the resources exposed by the storage service. """ SCHEMA = { "$schema": "http://json-schema.org/draft-04/schema#", "type": "array", "items": { "type": "object", "properties": { "availability_zones": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "volume_types": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "exclude": { "type": "array", "items": { "type": "object", "properties": { "storage_pools": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "volumes": { "type": "array", "items": { "type": "object", "properties": { "uuid": { "type": "string" } }, "additionalProperties": False } }, "projects": { "type": "array", "items": { "type": "object", "properties": { "uuid": { "type": "string" } }, "additionalProperties": False } }, "additionalProperties": False } } } }, "additionalProperties": False } } def __init__(self, config, osc=None): super(CinderClusterDataModelCollector, self).__init__(config, osc) @property def notification_endpoints(self): """Associated notification endpoints :return: Associated notification endpoints :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances """ return [ cinder.CapacityNotificationEndpoint(self), cinder.VolumeCreateEnd(self), cinder.VolumeDeleteEnd(self), cinder.VolumeUpdateEnd(self), cinder.VolumeAttachEnd(self), cinder.VolumeDetachEnd(self), cinder.VolumeResizeEnd(self) ] def get_audit_scope_handler(self, audit_scope): self._audit_scope_handler = storage_scope.StorageScope( audit_scope, self.config) if self._data_model_scope is None or ( len(self._data_model_scope) > 0 and ( self._data_model_scope != audit_scope)): self._data_model_scope = audit_scope self._cluster_data_model = None LOG.debug("audit scope %s", audit_scope) return self._audit_scope_handler def execute(self): """Build the storage cluster data model""" LOG.debug("Building latest Cinder cluster data model") if self._audit_scope_handler is None: LOG.debug("No audit, Don't Build storage data model") return if self._data_model_scope is None: LOG.debug("No audit scope, Don't Build storage data model") return builder = CinderModelBuilder(self.osc) return builder.execute(self._data_model_scope) class CinderModelBuilder(base.BaseModelBuilder): """Build the graph-based model This model builder adds the following data" - Storage-related knowledge (Cinder) """ def __init__(self, osc): self.osc = osc self.model = model_root.StorageModelRoot() self.cinder = osc.cinder() self.cinder_helper = cinder_helper.CinderHelper(osc=self.osc) def _add_physical_layer(self): """Add the physical layer of the graph. This includes components which represent actual infrastructure hardware. """ for snode in self.call_retry( self.cinder_helper.get_storage_node_list): self.add_storage_node(snode) for pool in self.call_retry(self.cinder_helper.get_storage_pool_list): pool = self._build_storage_pool(pool) self.model.add_pool(pool) storage_name = getattr(pool, 'name') try: storage_node = self.model.get_node_by_name( storage_name) # Connect the instance to its compute node self.model.map_pool(pool, storage_node) except exception.StorageNodeNotFound: continue def add_storage_node(self, node): # Build and add base node. storage_node = self.build_storage_node(node) self.model.add_node(storage_node) def add_storage_pool(self, pool): storage_pool = self._build_storage_pool(pool) self.model.add_pool(storage_pool) def build_storage_node(self, node): """Build a storage node from a Cinder storage node :param node: A storage node :type node: :py:class:`~cinderclient.v3.services.Service` """ # node.host is formatted as host@backendname since ocata, # or may be only host as of ocata backend = "" try: backend = node.host.split('@')[1] except IndexError: pass volume_type = self.call_retry( self.cinder_helper.get_volume_type_by_backendname, backend) # build up the storage node. node_attributes = { "host": node.host, "zone": node.zone, "state": node.state, "status": node.status, "volume_type": volume_type} storage_node = element.StorageNode(**node_attributes) return storage_node def _build_storage_pool(self, pool): """Build a storage pool from a Cinder storage pool :param pool: A storage pool :type pool: :py:class:`~cinderclient.v3.pools.Pool` :raises: exception.InvalidPoolAttributeValue """ # build up the storage pool. attrs = ["total_volumes", "total_capacity_gb", "free_capacity_gb", "provisioned_capacity_gb", "allocated_capacity_gb"] node_attributes = {"name": pool.name} for attr in attrs: try: node_attributes[attr] = int(getattr(pool, attr)) except AttributeError: LOG.debug("Attribute %s for pool %s is not provided", attr, pool.name) except ValueError: raise exception.InvalidPoolAttributeValue( name=pool.name, attribute=attr) storage_pool = element.Pool(**node_attributes) return storage_pool def _add_virtual_layer(self): """Add the virtual layer to the graph. This layer is the virtual components of the infrastructure. """ self._add_virtual_storage() def _add_virtual_storage(self): volumes = self.call_retry(self.cinder_helper.get_volume_list) for vol in volumes: volume = self._build_volume_node(vol) self.model.add_volume(volume) pool_name = getattr(vol, 'os-vol-host-attr:host') if pool_name is None: # The volume is not attached to any pool continue try: pool = self.model.get_pool_by_pool_name( pool_name) self.model.map_volume(volume, pool) except exception.PoolNotFound: continue def _build_volume_node(self, volume): """Build an volume node Create an volume node for the graph using cinder and the `volume` cinder object. :param instance: Cinder Volume object. :return: A volume node for the graph. """ attachments = [{k: v for k, v in iter(d.items()) if k in ( 'server_id', 'attachment_id')} for d in volume.attachments] volume_attributes = { "uuid": volume.id, "size": volume.size, "status": volume.status, "attachments": attachments, "name": volume.name or "", "multiattach": volume.multiattach, "snapshot_id": volume.snapshot_id or "", "project_id": getattr(volume, 'os-vol-tenant-attr:tenant_id'), "metadata": volume.metadata, "bootable": volume.bootable} return element.Volume(**volume_attributes) def execute(self, model_scope): """Instantiates the graph with the openstack cluster data. The graph is populated along 2 layers: virtual and physical. As each new layer is built connections are made back to previous layers. """ # TODO(Dantali0n): Use scope to limit size of model self._add_physical_layer() self._add_virtual_layer() return self.model ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/collector/ironic.py0000664000175000017500000000776000000000000026371 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Authors:Yumeng Bao # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from watcher.common import ironic_helper from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import element from watcher.decision_engine.model import model_root from watcher.decision_engine.scope import baremetal as baremetal_scope LOG = log.getLogger(__name__) class BaremetalClusterDataModelCollector(base.BaseClusterDataModelCollector): """Baremetal cluster data model collector The Baremetal cluster data model collector creates an in-memory representation of the resources exposed by the baremetal service. """ def __init__(self, config, osc=None): super(BaremetalClusterDataModelCollector, self).__init__(config, osc) @property def notification_endpoints(self): """Associated notification endpoints :return: Associated notification endpoints :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances """ return None def get_audit_scope_handler(self, audit_scope): self._audit_scope_handler = baremetal_scope.BaremetalScope( audit_scope, self.config) if self._data_model_scope is None or ( len(self._data_model_scope) > 0 and ( self._data_model_scope != audit_scope)): self._data_model_scope = audit_scope self._cluster_data_model = None LOG.debug("audit scope %s", audit_scope) return self._audit_scope_handler def execute(self): """Build the baremetal cluster data model""" LOG.debug("Building latest Baremetal cluster data model") if self._audit_scope_handler is None: LOG.debug("No audit, Don't Build Baremetal data model") return if self._data_model_scope is None: LOG.debug("No audit scope, Don't Build Baremetal data model") return builder = BareMetalModelBuilder(self.osc) return builder.execute(self._data_model_scope) class BareMetalModelBuilder(base.BaseModelBuilder): """Build the graph-based model This model builder adds the following data" - Baremetal-related knowledge (Ironic) """ def __init__(self, osc): self.osc = osc self.model = model_root.BaremetalModelRoot() # TODO(lpetrut): add MAAS support self.ironic_helper = ironic_helper.IronicHelper(osc=self.osc) def add_ironic_node(self, node): # Build and add base node. ironic_node = self.build_ironic_node(node) self.model.add_node(ironic_node) def build_ironic_node(self, node): """Build a Baremetal node from a Ironic node :param node: A ironic node :type node: :py:class:`~ironicclient.v1.node.Node` """ # build up the ironic node. node_attributes = { "uuid": node.uuid, "power_state": node.power_state, "maintenance": node.maintenance, "maintenance_reason": node.maintenance_reason, "extra": {"compute_node_id": node.extra.compute_node_id} } ironic_node = element.IronicNode(**node_attributes) return ironic_node def execute(self, model_scope): # TODO(Dantali0n): Use scope to limit size of model for node in self.call_retry(self.ironic_helper.get_ironic_node_list): self.add_ironic_node(node) return self.model ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/collector/manager.py0000664000175000017500000000442700000000000026515 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.common import utils from watcher.decision_engine.loading import default class CollectorManager(object): def __init__(self): self.collector_loader = default.ClusterDataModelCollectorLoader() self._collectors = None self._notification_endpoints = None def get_collectors(self): if self._collectors is None: collectors = utils.Struct() collector_plugins = cfg.CONF.collector.collector_plugins for collector_name in collector_plugins: collector = self.collector_loader.load(collector_name) collectors[collector_name] = collector self._collectors = collectors return self._collectors def get_notification_endpoints(self): if self._notification_endpoints is None: endpoints = [] for collector in self.get_collectors().values(): endpoints.extend(collector.notification_endpoints) self._notification_endpoints = endpoints return self._notification_endpoints def get_cluster_model_collector(self, name, osc=None): """Retrieve cluster data model collector :param name: name of the cluster data model collector plugin :type name: str :param osc: an OpenStackClients instance :type osc: :py:class:`~.OpenStackClients` instance :returns: cluster data model collector plugin :rtype: :py:class:`~.BaseClusterDataModelCollector` """ return self.collector_loader.load( name, osc=osc) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/collector/nova.py0000664000175000017500000005213000000000000026040 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Intel Innovation and Research Ireland Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os_resource_classes as orc from oslo_log import log from futurist import waiters from watcher.common import nova_helper from watcher.common import placement_helper from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import element from watcher.decision_engine.model import model_root from watcher.decision_engine.model.notification import nova from watcher.decision_engine.scope import compute as compute_scope from watcher.decision_engine import threading LOG = log.getLogger(__name__) class NovaClusterDataModelCollector(base.BaseClusterDataModelCollector): """Nova cluster data model collector The Nova cluster data model collector creates an in-memory representation of the resources exposed by the compute service. """ HOST_AGGREGATES = "#/items/properties/compute/host_aggregates/" SCHEMA = { "$schema": "http://json-schema.org/draft-04/schema#", "type": "array", "items": { "type": "object", "properties": { "host_aggregates": { "type": "array", "items": { "anyOf": [ {"$ref": HOST_AGGREGATES + "host_aggr_id"}, {"$ref": HOST_AGGREGATES + "name"}, ] } }, "availability_zones": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "exclude": { "type": "array", "items": { "type": "object", "properties": { "instances": { "type": "array", "items": { "type": "object", "properties": { "uuid": { "type": "string" } }, "additionalProperties": False } }, "compute_nodes": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "host_aggregates": { "type": "array", "items": { "anyOf": [ {"$ref": HOST_AGGREGATES + "host_aggr_id"}, {"$ref": HOST_AGGREGATES + "name"}, ] } }, "instance_metadata": { "type": "array", "items": { "type": "object" } }, "projects": { "type": "array", "items": { "type": "object", "properties": { "uuid": { "type": "string" } }, "additionalProperties": False } } }, "additionalProperties": False } } }, "additionalProperties": False }, "host_aggregates": { "host_aggr_id": { "properties": { "id": { "oneOf": [ {"type": "integer"}, {"enum": ["*"]} ] } }, "additionalProperties": False }, "name": { "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "additionalProperties": False } def __init__(self, config, osc=None): super(NovaClusterDataModelCollector, self).__init__(config, osc) @property def notification_endpoints(self): """Associated notification endpoints :return: Associated notification endpoints :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances """ return [ nova.VersionedNotification(self), ] def get_audit_scope_handler(self, audit_scope): self._audit_scope_handler = compute_scope.ComputeScope( audit_scope, self.config) if self._data_model_scope is None or ( len(self._data_model_scope) > 0 and ( self._data_model_scope != audit_scope)): self._data_model_scope = audit_scope self._cluster_data_model = None LOG.debug("audit scope %s", audit_scope) return self._audit_scope_handler def execute(self): """Build the compute cluster data model""" LOG.debug("Building latest Nova cluster data model") if self._audit_scope_handler is None: LOG.debug("No audit, Don't Build compute data model") return if self._data_model_scope is None: LOG.debug("No audit scope, Don't Build compute data model") return builder = NovaModelBuilder(self.osc) return builder.execute(self._data_model_scope) class NovaModelBuilder(base.BaseModelBuilder): """Build the graph-based model This model builder adds the following data" - Compute-related knowledge (Nova) - TODO(v-francoise): Network-related knowledge (Neutron) NOTE(v-francoise): This model builder is meant to be extended in the future to also include both storage and network information respectively coming from Cinder and Neutron. Some prelimary work has been done in this direction in https://review.opendev.org/#/c/362730 but since we cannot guarantee a sufficient level of consistency for neither the storage nor the network part before the end of the Ocata cycle, this work has been re-scheduled for Pike. In the meantime, all the associated code has been commented out. """ def __init__(self, osc): self.osc = osc self.model = None self.model_scope = dict() self.no_model_scope_flag = False self.nova = osc.nova() self.nova_helper = nova_helper.NovaHelper(osc=self.osc) self.placement_helper = placement_helper.PlacementHelper(osc=self.osc) self.executor = threading.DecisionEngineThreadPool() def _collect_aggregates(self, host_aggregates, _nodes): if not host_aggregates: return aggregate_list = self.call_retry(f=self.nova_helper.get_aggregate_list) aggregate_ids = [aggregate['id'] for aggregate in host_aggregates if 'id' in aggregate] aggregate_names = [aggregate['name'] for aggregate in host_aggregates if 'name' in aggregate] include_all_nodes = any('*' in field for field in (aggregate_ids, aggregate_names)) for aggregate in aggregate_list: if (aggregate.id in aggregate_ids or aggregate.name in aggregate_names or include_all_nodes): _nodes.update(aggregate.hosts) def _collect_zones(self, availability_zones, _nodes): if not availability_zones: return service_list = self.call_retry(f=self.nova_helper.get_service_list) zone_names = [zone['name'] for zone in availability_zones] include_all_nodes = False if '*' in zone_names: include_all_nodes = True for service in service_list: if service.zone in zone_names or include_all_nodes: _nodes.add(service.host) def _compute_node_future(self, future, future_instances): """Add compute node information to model and schedule instance info job :param future: The future from the finished execution :rtype future: :py:class:`futurist.GreenFuture` :param future_instances: list of futures for instance jobs :rtype future_instances: list :py:class:`futurist.GreenFuture` """ try: node_info = future.result()[0] # filter out baremetal node if node_info.hypervisor_type == 'ironic': LOG.debug("filtering out baremetal node: %s", node_info) return self.add_compute_node(node_info) # node.servers is a list of server objects # New in nova version 2.53 instances = getattr(node_info, "servers", None) # Do not submit job if there are no instances on compute node if instances is None: LOG.info("No instances on compute_node: %s", node_info) return future_instances.append( self.executor.submit( self.add_instance_node, node_info, instances) ) except Exception: LOG.error("compute node from aggregate / " "availability_zone could not be found") def _add_physical_layer(self): """Collects all information on compute nodes and instances Will collect all required compute node and instance information based on the host aggregates and availability zones. If aggregates and zones do not specify any compute nodes all nodes are retrieved instead. The collection of information happens concurrently using the DecisionEngineThreadpool. The collection is parallelized in three steps first information about aggregates and zones is gathered. Secondly, for each of the compute nodes a tasks is submitted to get detailed information about the compute node. Finally, Each of these submitted tasks will submit an additional task if the compute node contains instances. Before returning from this function all instance tasks are waited upon to complete. """ compute_nodes = set() host_aggregates = self.model_scope.get("host_aggregates") availability_zones = self.model_scope.get("availability_zones") """Submit tasks to gather compute nodes from availability zones and host aggregates. Each task adds compute nodes to the set, this set is threadsafe under the assumption that CPython is used with the GIL enabled.""" zone_aggregate_futures = { self.executor.submit( self._collect_aggregates, host_aggregates, compute_nodes), self.executor.submit( self._collect_zones, availability_zones, compute_nodes) } waiters.wait_for_all(zone_aggregate_futures) # if zones and aggregates did not contain any nodes get every node. if not compute_nodes: self.no_model_scope_flag = True all_nodes = self.call_retry( f=self.nova_helper.get_compute_node_list) compute_nodes = set( [node.hypervisor_hostname for node in all_nodes]) LOG.debug("compute nodes: %s", compute_nodes) node_futures = [self.executor.submit( self.nova_helper.get_compute_node_by_name, node, servers=True, detailed=True) for node in compute_nodes] LOG.debug("submitted %d jobs", len(compute_nodes)) # Futures will concurrently be added, only safe with CPython GIL future_instances = [] self.executor.do_while_futures_modify( node_futures, self._compute_node_future, future_instances) # Wait for all instance jobs to finish waiters.wait_for_all(future_instances) def add_compute_node(self, node): # Build and add base node. LOG.debug("node info: %s", node) compute_node = self.build_compute_node(node) self.model.add_node(compute_node) # NOTE(v-francoise): we can encapsulate capabilities of the node # (special instruction sets of CPUs) in the attributes; as well as # sub-nodes can be added re-presenting e.g. GPUs/Accelerators etc. # # Build & add disk, memory, network and cpu nodes. # disk_id, disk_node = self.build_disk_compute_node(base_id, node) # self.add_node(disk_id, disk_node) # mem_id, mem_node = self.build_memory_compute_node(base_id, node) # self.add_node(mem_id, mem_node) # net_id, net_node = self._build_network_compute_node(base_id) # self.add_node(net_id, net_node) # cpu_id, cpu_node = self.build_cpu_compute_node(base_id, node) # self.add_node(cpu_id, cpu_node) # # Connect the base compute node to the dependent nodes. # self.add_edges_from([(base_id, disk_id), (base_id, mem_id), # (base_id, cpu_id), (base_id, net_id)], # label="contains") def build_compute_node(self, node): """Build a compute node from a Nova compute node :param node: A node hypervisor instance :type node: :py:class:`~novaclient.v2.hypervisors.Hypervisor` """ inventories = self.placement_helper.get_inventories(node.id) if inventories and orc.VCPU in inventories: vcpus = inventories[orc.VCPU]['total'] vcpu_reserved = inventories[orc.VCPU]['reserved'] vcpu_ratio = inventories[orc.VCPU]['allocation_ratio'] else: vcpus = node.vcpus vcpu_reserved = 0 vcpu_ratio = 1.0 if inventories and orc.MEMORY_MB in inventories: memory_mb = inventories[orc.MEMORY_MB]['total'] memory_mb_reserved = inventories[orc.MEMORY_MB]['reserved'] memory_ratio = inventories[orc.MEMORY_MB]['allocation_ratio'] else: memory_mb = node.memory_mb memory_mb_reserved = 0 memory_ratio = 1.0 # NOTE(licanwei): A nova BP support-shared-storage-resource-provider # will move DISK_GB from compute node to shared storage RP. # Here may need to be updated when the nova BP released. if inventories and orc.DISK_GB in inventories: disk_capacity = inventories[orc.DISK_GB]['total'] disk_gb_reserved = inventories[orc.DISK_GB]['reserved'] disk_ratio = inventories[orc.DISK_GB]['allocation_ratio'] else: disk_capacity = node.local_gb disk_gb_reserved = 0 disk_ratio = 1.0 # build up the compute node. node_attributes = { # The id of the hypervisor as a UUID from version 2.53. "uuid": node.id, "hostname": node.service["host"], "memory": memory_mb, "memory_ratio": memory_ratio, "memory_mb_reserved": memory_mb_reserved, "disk": disk_capacity, "disk_gb_reserved": disk_gb_reserved, "disk_ratio": disk_ratio, "vcpus": vcpus, "vcpu_reserved": vcpu_reserved, "vcpu_ratio": vcpu_ratio, "state": node.state, "status": node.status, "disabled_reason": node.service["disabled_reason"]} compute_node = element.ComputeNode(**node_attributes) # compute_node = self._build_node("physical", "compute", "hypervisor", # node_attributes) return compute_node def add_instance_node(self, node, instances): if instances is None: LOG.info("no instances on compute_node: %s", node) return host = node.service["host"] compute_node = self.model.get_node_by_uuid(node.id) filters = {'host': host} limit = len(instances) if len(instances) <= 1000 else -1 # Get all servers on this compute host. # Note that the advantage of passing the limit parameter is # that it can speed up the call time of novaclient. 1000 is # the default maximum number of return servers provided by # compute API. If we need to request more than 1000 servers, # we can set limit=-1. For details, please see: # https://bugs.launchpad.net/watcher/+bug/1834679 instances = self.call_retry(f=self.nova_helper.get_instance_list, filters=filters, limit=limit) for inst in instances: # skip deleted instance if getattr(inst, "OS-EXT-STS:vm_state") == ( element.InstanceState.DELETED.value): continue # Add Node instance = self._build_instance_node(inst) self.model.add_instance(instance) # Connect the instance to its compute node self.model.map_instance(instance, compute_node) def _build_instance_node(self, instance): """Build an instance node Create an instance node for the graph using nova and the `server` nova object. :param instance: Nova VM object. :return: An instance node for the graph. """ flavor = instance.flavor instance_attributes = { "uuid": instance.id, "name": instance.name, "memory": flavor["ram"], "disk": flavor["disk"], "vcpus": flavor["vcpus"], "state": getattr(instance, "OS-EXT-STS:vm_state"), "metadata": instance.metadata, "project_id": instance.tenant_id, "locked": instance.locked} # node_attributes = dict() # node_attributes["layer"] = "virtual" # node_attributes["category"] = "compute" # node_attributes["type"] = "compute" # node_attributes["attributes"] = instance_attributes return element.Instance(**instance_attributes) def _merge_compute_scope(self, compute_scope): model_keys = self.model_scope.keys() update_flag = False role_keys = ("host_aggregates", "availability_zones") for role in compute_scope: role_key = list(role.keys())[0] if role_key not in role_keys: continue role_values = list(role.values())[0] if role_key in model_keys: for value in role_values: if value not in self.model_scope[role_key]: self.model_scope[role_key].append(value) update_flag = True else: self.model_scope[role_key] = role_values update_flag = True return update_flag def _check_model_scope(self, model_scope): compute_scope = [] update_flag = False for _scope in model_scope: if 'compute' in _scope: compute_scope = _scope['compute'] break if self.no_model_scope_flag is False: if compute_scope: update_flag = self._merge_compute_scope(compute_scope) else: self.model_scope = dict() update_flag = True return update_flag def execute(self, model_scope): """Instantiates the graph with the openstack cluster data.""" updata_model_flag = self._check_model_scope(model_scope) if self.model is None or updata_model_flag: self.model = self.model or model_root.ModelRoot() self._add_physical_layer() return self.model ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6351352 python_watcher-14.0.0/watcher/decision_engine/model/element/0000775000175000017500000000000000000000000024165 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/element/__init__.py0000664000175000017500000000245000000000000026277 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.model.element import instance from watcher.decision_engine.model.element import node from watcher.decision_engine.model.element import volume ServiceState = node.ServiceState ComputeNode = node.ComputeNode StorageNode = node.StorageNode IronicNode = node.IronicNode Pool = node.Pool InstanceState = instance.InstanceState Instance = instance.Instance VolumeState = volume.VolumeState Volume = volume.Volume __all__ = ['ServiceState', 'ComputeNode', 'InstanceState', 'Instance', 'StorageNode', 'Pool', 'VolumeState', 'Volume', 'IronicNode'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/element/baremetal_resource.py0000664000175000017500000000163400000000000030406 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from watcher.decision_engine.model.element import base from watcher.objects import fields as wfields class BaremetalResource(base.Element, metaclass=abc.ABCMeta): VERSION = '1.0' fields = { "uuid": wfields.StringField(), "human_id": wfields.StringField(default=""), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/element/base.py0000664000175000017500000000403600000000000025454 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import collections from lxml import etree # nosec: B410 from oslo_log import log from watcher.objects import base from watcher.objects import fields as wfields LOG = log.getLogger(__name__) class Element(base.WatcherObject, base.WatcherObjectDictCompat, base.WatcherComparableObject, metaclass=abc.ABCMeta): # Initial version VERSION = '1.0' fields = {} def __init__(self, context=None, **kwargs): for name, field in self.fields.items(): # The idea here is to force the initialization of unspecified # fields that have a default value if (name not in kwargs and not field.nullable and field.default != wfields.UnspecifiedDefault): kwargs[name] = field.default super(Element, self).__init__(context, **kwargs) @abc.abstractmethod def accept(self, visitor): raise NotImplementedError() def as_xml_element(self): sorted_fieldmap = [] for field in self.fields: try: value = str(self[field]) sorted_fieldmap.append((field, value)) except Exception as exc: LOG.exception(exc) attrib = collections.OrderedDict(sorted_fieldmap) element_name = self.__class__.__name__ instance_el = etree.Element(element_name, attrib=attrib) return instance_el ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/element/compute_resource.py0000664000175000017500000000153400000000000030125 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from watcher.decision_engine.model.element import base from watcher.objects import fields as wfields class ComputeResource(base.Element, metaclass=abc.ABCMeta): VERSION = '1.0' fields = { "uuid": wfields.StringField(), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/element/instance.py0000664000175000017500000000416200000000000026346 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from watcher.decision_engine.model.element import compute_resource from watcher.objects import base from watcher.objects import fields as wfields class InstanceState(enum.Enum): ACTIVE = 'active' # Instance is running BUILDING = 'building' # Instance only exists in DB PAUSED = 'paused' SUSPENDED = 'suspended' # Instance is suspended to disk. STOPPED = 'stopped' # Instance is shut off, the disk image is still there. RESCUED = 'rescued' # A rescue image is running with the original image # attached. RESIZED = 'resized' # an Instance with the new size is active. SHELVED = 'shelved' SOFT_DELETED = 'soft-delete' # still available to restore. DELETED = 'deleted' # Instance is permanently deleted. ERROR = 'error' @base.WatcherObjectRegistry.register_if(False) class Instance(compute_resource.ComputeResource): fields = { # If the resource is excluded by the scope, # 'watcher_exclude' property will be set True. "watcher_exclude": wfields.BooleanField(default=False), "name": wfields.StringField(), "state": wfields.StringField(default=InstanceState.ACTIVE.value), "memory": wfields.NonNegativeIntegerField(), "disk": wfields.NonNegativeIntegerField(), "vcpus": wfields.NonNegativeIntegerField(), "metadata": wfields.JsonField(), "project_id": wfields.UUIDField(), "locked": wfields.BooleanField(default=False), } def accept(self, visitor): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/element/node.py0000664000175000017500000000730100000000000025465 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from watcher.decision_engine.model.element import baremetal_resource from watcher.decision_engine.model.element import compute_resource from watcher.decision_engine.model.element import storage_resource from watcher.objects import base from watcher.objects import fields as wfields class ServiceState(enum.Enum): ONLINE = 'up' OFFLINE = 'down' ENABLED = 'enabled' DISABLED = 'disabled' @base.WatcherObjectRegistry.register_if(False) class ComputeNode(compute_resource.ComputeResource): fields = { "hostname": wfields.StringField(), "status": wfields.StringField(default=ServiceState.ENABLED.value), "disabled_reason": wfields.StringField(nullable=True), "state": wfields.StringField(default=ServiceState.ONLINE.value), "memory": wfields.NonNegativeIntegerField(), "memory_mb_reserved": wfields.NonNegativeIntegerField(), "disk": wfields.NonNegativeIntegerField(), "disk_gb_reserved": wfields.NonNegativeIntegerField(), "vcpus": wfields.NonNegativeIntegerField(), "vcpu_reserved": wfields.NonNegativeIntegerField(), "memory_ratio": wfields.NonNegativeFloatField(), "vcpu_ratio": wfields.NonNegativeFloatField(), "disk_ratio": wfields.NonNegativeFloatField(), } def accept(self, visitor): raise NotImplementedError() @property def memory_mb_capacity(self): return (self.memory-self.memory_mb_reserved)*self.memory_ratio @property def disk_gb_capacity(self): return (self.disk-self.disk_gb_reserved)*self.disk_ratio @property def vcpu_capacity(self): return (self.vcpus-self.vcpu_reserved)*self.vcpu_ratio @base.WatcherObjectRegistry.register_if(False) class StorageNode(storage_resource.StorageResource): fields = { "host": wfields.StringField(), "zone": wfields.StringField(), "status": wfields.StringField(default=ServiceState.ENABLED.value), "state": wfields.StringField(default=ServiceState.ONLINE.value), "volume_type": wfields.ListOfStringsField() } def accept(self, visitor): raise NotImplementedError() @base.WatcherObjectRegistry.register_if(False) class Pool(storage_resource.StorageResource): fields = { "name": wfields.StringField(), "total_volumes": wfields.NonNegativeIntegerField(), "total_capacity_gb": wfields.NonNegativeIntegerField(), "free_capacity_gb": wfields.NonNegativeIntegerField(), "provisioned_capacity_gb": wfields.NonNegativeIntegerField(), "allocated_capacity_gb": wfields.NonNegativeIntegerField(), "virtual_free": wfields.NonNegativeIntegerField(default=0), } def accept(self, visitor): raise NotImplementedError() @base.WatcherObjectRegistry.register_if(False) class IronicNode(baremetal_resource.BaremetalResource): fields = { "power_state": wfields.StringField(), "maintenance": wfields.BooleanField(), "maintenance_reason": wfields.StringField(), "extra": wfields.DictField() } def accept(self, visitor): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/element/storage_resource.py0000664000175000017500000000164000000000000030113 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from watcher.decision_engine.model.element import base from watcher.objects import fields as wfields class StorageResource(base.Element, metaclass=abc.ABCMeta): VERSION = '1.0' fields = { "uuid": wfields.StringField(default=""), "human_id": wfields.StringField(default=""), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/element/volume.py0000664000175000017500000000350000000000000026044 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from watcher.decision_engine.model.element import storage_resource from watcher.objects import base from watcher.objects import fields as wfields class VolumeState(enum.Enum): # https://docs.openstack.org/api-ref/block-storage/v3/#volumes-volumes CREATING = 'creating' AVAILABLE = 'available' ATTACHING = 'attaching' IN_USE = 'in-use' DELETING = 'deleting' ERROR = 'error' ERROR_DELETING = 'error_deleting' BACKING_UP = 'backing-up' RESTORING_BACKUP = 'restoring-backup' ERROR_RESTORING = 'error_restoring' ERROR_EXTENDING = 'error_extending' @base.WatcherObjectRegistry.register_if(False) class Volume(storage_resource.StorageResource): fields = { "size": wfields.NonNegativeIntegerField(), "status": wfields.StringField(default=VolumeState.AVAILABLE.value), "attachments": wfields.FlexibleListOfDictField(), "name": wfields.StringField(), "multiattach": wfields.BooleanField(), "snapshot_id": wfields.UUIDField(nullable=True), "project_id": wfields.UUIDField(), "metadata": wfields.JsonField(), "bootable": wfields.BooleanField() } def accept(self, visitor): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/model_root.py0000664000175000017500000005667300000000000025272 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Innovation and Research Ireland Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Openstack implementation of the cluster graph. """ import ast from lxml import etree # nosec: B410 import networkx as nx from oslo_concurrency import lockutils from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import base from watcher.decision_engine.model import element LOG = log.getLogger(__name__) class ModelRoot(nx.DiGraph, base.Model): """Cluster graph for an Openstack cluster.""" def __init__(self, stale=False): super(ModelRoot, self).__init__() self.stale = stale def __nonzero__(self): return not self.stale __bool__ = __nonzero__ @staticmethod def assert_node(obj): if not isinstance(obj, element.ComputeNode): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid: %s") % type(obj)) @staticmethod def assert_instance(obj): if not isinstance(obj, element.Instance): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid")) @lockutils.synchronized("model_root") def add_node(self, node): self.assert_node(node) super(ModelRoot, self).add_node(node.uuid, attr=node) @lockutils.synchronized("model_root") def remove_node(self, node): self.assert_node(node) try: super(ModelRoot, self).remove_node(node.uuid) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.ComputeNodeNotFound(name=node.uuid) @lockutils.synchronized("model_root") def add_instance(self, instance): self.assert_instance(instance) try: super(ModelRoot, self).add_node(instance.uuid, attr=instance) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.InstanceNotFound(name=instance.uuid) @lockutils.synchronized("model_root") def remove_instance(self, instance): self.assert_instance(instance) super(ModelRoot, self).remove_node(instance.uuid) @lockutils.synchronized("model_root") def map_instance(self, instance, node): """Map a newly created instance to a node :param instance: :py:class:`~.instance.Instance` object or instance UUID :type instance: str or :py:class:`~.instance.Instance` :param node: :py:class:`~.node.ComputeNode` object or node UUID :type node: str or :py:class:`~.instance.Instance` """ if isinstance(instance, str): instance = self.get_instance_by_uuid(instance) if isinstance(node, str): node = self.get_node_by_uuid(node) self.assert_node(node) self.assert_instance(instance) self.add_edge(instance.uuid, node.uuid) @lockutils.synchronized("model_root") def unmap_instance(self, instance, node): if isinstance(instance, str): instance = self.get_instance_by_uuid(instance) if isinstance(node, str): node = self.get_node_by_uuid(node) self.remove_edge(instance.uuid, node.uuid) def delete_instance(self, instance, node=None): self.assert_instance(instance) self.remove_instance(instance) @lockutils.synchronized("model_root") def migrate_instance(self, instance, source_node, destination_node): """Migrate single instance from source_node to destination_node :param instance: :param source_node: :param destination_node: :return: """ self.assert_instance(instance) self.assert_node(source_node) self.assert_node(destination_node) if source_node == destination_node: return False # unmap self.remove_edge(instance.uuid, source_node.uuid) # map self.add_edge(instance.uuid, destination_node.uuid) return True @lockutils.synchronized("model_root") def get_all_compute_nodes(self): return {uuid: cn['attr'] for uuid, cn in self.nodes(data=True) if isinstance(cn['attr'], element.ComputeNode)} @lockutils.synchronized("model_root") def get_node_by_uuid(self, uuid): try: return self._get_by_uuid(uuid) except exception.ComputeResourceNotFound: raise exception.ComputeNodeNotFound(name=uuid) @lockutils.synchronized("model_root") def get_node_by_name(self, name): try: node_list = [cn['attr'] for uuid, cn in self.nodes(data=True) if (isinstance(cn['attr'], element.ComputeNode) and cn['attr']['hostname'] == name)] if node_list: return node_list[0] else: raise exception.ComputeNodeNotFound(name=name) except exception.ComputeResourceNotFound: raise exception.ComputeNodeNotFound(name=name) @lockutils.synchronized("model_root") def get_instance_by_uuid(self, uuid): try: return self._get_by_uuid(uuid) except exception.ComputeResourceNotFound: raise exception.InstanceNotFound(name=uuid) def _get_by_uuid(self, uuid): try: return self.nodes[uuid]['attr'] except Exception as exc: LOG.exception(exc) raise exception.ComputeResourceNotFound(name=uuid) @lockutils.synchronized("model_root") def get_node_by_instance_uuid(self, instance_uuid): instance = self._get_by_uuid(instance_uuid) for node_uuid in self.neighbors(instance.uuid): node = self._get_by_uuid(node_uuid) if isinstance(node, element.ComputeNode): return node raise exception.InstanceNotMapped(uuid=instance_uuid) @lockutils.synchronized("model_root") def get_all_instances(self): return {uuid: inst['attr'] for uuid, inst in self.nodes(data=True) if isinstance(inst['attr'], element.Instance)} @lockutils.synchronized("model_root") def get_node_instances(self, node): self.assert_node(node) node_instances = [] for instance_uuid in self.predecessors(node.uuid): instance = self._get_by_uuid(instance_uuid) if isinstance(instance, element.Instance): node_instances.append(instance) return node_instances def get_node_used_resources(self, node): vcpu_used = 0 memory_used = 0 disk_used = 0 for instance in self.get_node_instances(node): vcpu_used += instance.vcpus memory_used += instance.memory disk_used += instance.disk return dict(vcpu=vcpu_used, memory=memory_used, disk=disk_used) def get_node_free_resources(self, node): resources_used = self.get_node_used_resources(node) vcpu_free = node.vcpu_capacity-resources_used.get('vcpu') memory_free = node.memory_mb_capacity-resources_used.get('memory') disk_free = node.disk_gb_capacity-resources_used.get('disk') return dict(vcpu=vcpu_free, memory=memory_free, disk=disk_free) def to_string(self): return self.to_xml() def to_xml(self): root = etree.Element("ModelRoot") # Build compute node tree for cn in sorted(self.get_all_compute_nodes().values(), key=lambda cn: cn.uuid): compute_node_el = cn.as_xml_element() # Build mapped instance tree node_instances = self.get_node_instances(cn) for instance in sorted(node_instances, key=lambda x: x.uuid): instance_el = instance.as_xml_element() compute_node_el.append(instance_el) root.append(compute_node_el) # Build unmapped instance tree (i.e. not assigned to any compute node) for instance in sorted(self.get_all_instances().values(), key=lambda inst: inst.uuid): try: self.get_node_by_instance_uuid(instance.uuid) except exception.ComputeResourceNotFound: root.append(instance.as_xml_element()) return etree.tostring(root, pretty_print=True).decode('utf-8') def to_list(self): ret_list = [] for cn in sorted(self.get_all_compute_nodes().values(), key=lambda cn: cn.uuid): in_dict = {} for field in cn.fields: new_name = "node_"+str(field) in_dict[new_name] = cn[field] node_instances = self.get_node_instances(cn) if not node_instances: deep_in_dict = in_dict.copy() ret_list.append(deep_in_dict) continue for instance in sorted(node_instances, key=lambda x: x.uuid): for field in instance.fields: new_name = "server_"+str(field) in_dict[new_name] = instance[field] if in_dict != {}: deep_in_dict = in_dict.copy() ret_list.append(deep_in_dict) return ret_list @classmethod def from_xml(cls, data): model = cls() root = etree.fromstring(data) for cn in root.findall('.//ComputeNode'): node = element.ComputeNode(**cn.attrib) model.add_node(node) for inst in root.findall('.//Instance'): instance = element.Instance(**inst.attrib) instance.watcher_exclude = ast.literal_eval( inst.attrib["watcher_exclude"]) model.add_instance(instance) parent = inst.getparent() if parent.tag == 'ComputeNode': node = model.get_node_by_uuid(parent.get('uuid')) model.map_instance(instance, node) else: model.add_instance(instance) return model @classmethod def is_isomorphic(cls, G1, G2): def node_match(node1, node2): return node1['attr'].as_dict() == node2['attr'].as_dict() return nx.algorithms.isomorphism.isomorph.is_isomorphic( G1, G2, node_match=node_match) class StorageModelRoot(nx.DiGraph, base.Model): """Cluster graph for an Openstack cluster.""" def __init__(self, stale=False): super(StorageModelRoot, self).__init__() self.stale = stale def __nonzero__(self): return not self.stale __bool__ = __nonzero__ @staticmethod def assert_node(obj): if not isinstance(obj, element.StorageNode): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid: %s") % type(obj)) @staticmethod def assert_pool(obj): if not isinstance(obj, element.Pool): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid: %s") % type(obj)) @staticmethod def assert_volume(obj): if not isinstance(obj, element.Volume): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid: %s") % type(obj)) @lockutils.synchronized("storage_model") def add_node(self, node): self.assert_node(node) super(StorageModelRoot, self).add_node(node.host, attr=node) @lockutils.synchronized("storage_model") def add_pool(self, pool): self.assert_pool(pool) super(StorageModelRoot, self).add_node(pool.name, attr=pool) @lockutils.synchronized("storage_model") def remove_node(self, node): self.assert_node(node) try: super(StorageModelRoot, self).remove_node(node.host) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.StorageNodeNotFound(name=node.host) @lockutils.synchronized("storage_model") def remove_pool(self, pool): self.assert_pool(pool) try: super(StorageModelRoot, self).remove_node(pool.name) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.PoolNotFound(name=pool.name) @lockutils.synchronized("storage_model") def map_pool(self, pool, node): """Map a newly created pool to a node :param pool: :py:class:`~.node.Pool` object or pool name :param node: :py:class:`~.node.StorageNode` object or node host """ if isinstance(pool, str): pool = self.get_pool_by_pool_name(pool) if isinstance(node, str): node = self.get_node_by_name(node) self.assert_node(node) self.assert_pool(pool) self.add_edge(pool.name, node.host) @lockutils.synchronized("storage_model") def unmap_pool(self, pool, node): """Unmap a pool from a node :param pool: :py:class:`~.node.Pool` object or pool name :param node: :py:class:`~.node.StorageNode` object or node name """ if isinstance(pool, str): pool = self.get_pool_by_pool_name(pool) if isinstance(node, str): node = self.get_node_by_name(node) self.remove_edge(pool.name, node.host) @lockutils.synchronized("storage_model") def add_volume(self, volume): self.assert_volume(volume) super(StorageModelRoot, self).add_node(volume.uuid, attr=volume) @lockutils.synchronized("storage_model") def remove_volume(self, volume): self.assert_volume(volume) try: super(StorageModelRoot, self).remove_node(volume.uuid) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.VolumeNotFound(name=volume.uuid) @lockutils.synchronized("storage_model") def map_volume(self, volume, pool): """Map a newly created volume to a pool :param volume: :py:class:`~.volume.Volume` object or volume UUID :param pool: :py:class:`~.node.Pool` object or pool name """ if isinstance(volume, str): volume = self.get_volume_by_uuid(volume) if isinstance(pool, str): pool = self.get_pool_by_pool_name(pool) self.assert_pool(pool) self.assert_volume(volume) self.add_edge(volume.uuid, pool.name) @lockutils.synchronized("storage_model") def unmap_volume(self, volume, pool): """Unmap a volume from a pool :param volume: :py:class:`~.volume.Volume` object or volume UUID :param pool: :py:class:`~.node.Pool` object or pool name """ if isinstance(volume, str): volume = self.get_volume_by_uuid(volume) if isinstance(pool, str): pool = self.get_pool_by_pool_name(pool) self.remove_edge(volume.uuid, pool.name) def delete_volume(self, volume): self.assert_volume(volume) self.remove_volume(volume) @lockutils.synchronized("storage_model") def get_all_storage_nodes(self): return {host: cn['attr'] for host, cn in self.nodes(data=True) if isinstance(cn['attr'], element.StorageNode)} @lockutils.synchronized("storage_model") def get_node_by_name(self, name): try: return self._get_by_name(name.split("#")[0]) except exception.StorageResourceNotFound: raise exception.StorageNodeNotFound(name=name) @lockutils.synchronized("storage_model") def get_pool_by_pool_name(self, name): try: return self._get_by_name(name) except exception.StorageResourceNotFound: raise exception.PoolNotFound(name=name) @lockutils.synchronized("storage_model") def get_volume_by_uuid(self, uuid): try: return self._get_by_uuid(uuid) except exception.StorageResourceNotFound: raise exception.VolumeNotFound(name=uuid) def _get_by_uuid(self, uuid): try: return self.nodes[uuid]['attr'] except Exception as exc: LOG.exception(exc) raise exception.StorageResourceNotFound(name=uuid) def _get_by_name(self, name): try: return self.nodes[name]['attr'] except Exception as exc: LOG.exception(exc) raise exception.StorageResourceNotFound(name=name) @lockutils.synchronized("storage_model") def get_node_by_pool_name(self, pool_name): pool = self._get_by_name(pool_name) for node_name in self.neighbors(pool.name): node = self._get_by_name(node_name) if isinstance(node, element.StorageNode): return node raise exception.StorageNodeNotFound(name=pool_name) @lockutils.synchronized("storage_model") def get_node_pools(self, node): self.assert_node(node) node_pools = [] for pool_name in self.predecessors(node.host): pool = self._get_by_name(pool_name) if isinstance(pool, element.Pool): node_pools.append(pool) return node_pools @lockutils.synchronized("storage_model") def get_pool_by_volume(self, volume): self.assert_volume(volume) volume = self._get_by_uuid(volume.uuid) for p in self.neighbors(volume.uuid): pool = self._get_by_name(p) if isinstance(pool, element.Pool): return pool raise exception.PoolNotFound(name=volume.uuid) @lockutils.synchronized("storage_model") def get_all_volumes(self): return {name: vol['attr'] for name, vol in self.nodes(data=True) if isinstance(vol['attr'], element.Volume)} @lockutils.synchronized("storage_model") def get_pool_volumes(self, pool): self.assert_pool(pool) volumes = [] for vol in self.predecessors(pool.name): volume = self._get_by_uuid(vol) if isinstance(volume, element.Volume): volumes.append(volume) return volumes def to_string(self): return self.to_xml() def to_xml(self): root = etree.Element("ModelRoot") # Build storage node tree for cn in sorted(self.get_all_storage_nodes().values(), key=lambda cn: cn.host): storage_node_el = cn.as_xml_element() # Build mapped pool tree node_pools = self.get_node_pools(cn) for pool in sorted(node_pools, key=lambda x: x.name): pool_el = pool.as_xml_element() storage_node_el.append(pool_el) # Build mapped volume tree pool_volumes = self.get_pool_volumes(pool) for volume in sorted(pool_volumes, key=lambda x: x.uuid): volume_el = volume.as_xml_element() pool_el.append(volume_el) root.append(storage_node_el) # Build unmapped volume tree (i.e. not assigned to any pool) for volume in sorted(self.get_all_volumes().values(), key=lambda vol: vol.uuid): try: self.get_pool_by_volume(volume) except (exception.VolumeNotFound, exception.PoolNotFound): root.append(volume.as_xml_element()) return etree.tostring(root, pretty_print=True).decode('utf-8') @classmethod def from_xml(cls, data): model = cls() root = etree.fromstring(data) for cn in root.findall('.//StorageNode'): ndata = {} for attr, val in cn.items(): ndata[attr] = val volume_type = ndata.get('volume_type') if volume_type: ndata['volume_type'] = [volume_type] node = element.StorageNode(**ndata) model.add_node(node) for p in root.findall('.//Pool'): pool = element.Pool(**p.attrib) model.add_pool(pool) parent = p.getparent() if parent.tag == 'StorageNode': node = model.get_node_by_name(parent.get('host')) model.map_pool(pool, node) else: model.add_pool(pool) for vol in root.findall('.//Volume'): volume = element.Volume(**vol.attrib) model.add_volume(volume) parent = vol.getparent() if parent.tag == 'Pool': pool = model.get_pool_by_pool_name(parent.get('name')) model.map_volume(volume, pool) else: model.add_volume(volume) return model @classmethod def is_isomorphic(cls, G1, G2): return nx.algorithms.isomorphism.isomorph.is_isomorphic( G1, G2) class BaremetalModelRoot(nx.DiGraph, base.Model): """Cluster graph for an Openstack cluster: Baremetal Cluster.""" def __init__(self, stale=False): super(BaremetalModelRoot, self).__init__() self.stale = stale def __nonzero__(self): return not self.stale __bool__ = __nonzero__ @staticmethod def assert_node(obj): if not isinstance(obj, element.IronicNode): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid: %s") % type(obj)) @lockutils.synchronized("baremetal_model") def add_node(self, node): self.assert_node(node) super(BaremetalModelRoot, self).add_node(node.uuid, attr=node) @lockutils.synchronized("baremetal_model") def remove_node(self, node): self.assert_node(node) try: super(BaremetalModelRoot, self).remove_node(node.uuid) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.IronicNodeNotFound(uuid=node.uuid) @lockutils.synchronized("baremetal_model") def get_all_ironic_nodes(self): return {uuid: cn['attr'] for uuid, cn in self.nodes(data=True) if isinstance(cn['attr'], element.IronicNode)} @lockutils.synchronized("baremetal_model") def get_node_by_uuid(self, uuid): try: return self._get_by_uuid(uuid) except exception.BaremetalResourceNotFound: raise exception.IronicNodeNotFound(uuid=uuid) def _get_by_uuid(self, uuid): try: return self.nodes[uuid]['attr'] except Exception as exc: LOG.exception(exc) raise exception.BaremetalResourceNotFound(name=uuid) def to_string(self): return self.to_xml() def to_xml(self): root = etree.Element("ModelRoot") # Build Ironic node tree for cn in sorted(self.get_all_ironic_nodes().values(), key=lambda cn: cn.uuid): ironic_node_el = cn.as_xml_element() root.append(ironic_node_el) return etree.tostring(root, pretty_print=True).decode('utf-8') @classmethod def from_xml(cls, data): model = cls() root = etree.fromstring(data) for cn in root.findall('.//IronicNode'): node = element.IronicNode(**cn.attrib) model.add_node(node) return model @classmethod def is_isomorphic(cls, G1, G2): return nx.algorithms.isomorphism.isomorph.is_isomorphic( G1, G2) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6351352 python_watcher-14.0.0/watcher/decision_engine/model/notification/0000775000175000017500000000000000000000000025222 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/notification/__init__.py0000664000175000017500000000000000000000000027321 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/notification/base.py0000664000175000017500000000214000000000000026503 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc class NotificationEndpoint(object, metaclass=abc.ABCMeta): def __init__(self, collector): super(NotificationEndpoint, self).__init__() self.collector = collector self._notifier = None @property @abc.abstractmethod def filter_rule(self): """Notification Filter""" raise NotImplementedError() @property def cluster_data_model(self): return self.collector.cluster_data_model ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/notification/cinder.py0000664000175000017500000003350600000000000027047 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from watcher.common import cinder_helper from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.model.notification import base from watcher.decision_engine.model.notification import filtering LOG = log.getLogger(__name__) class CinderNotification(base.NotificationEndpoint): def __init__(self, collector): super(CinderNotification, self).__init__(collector) self._cinder = None @property def cinder(self): if self._cinder is None: self._cinder = cinder_helper.CinderHelper() return self._cinder def update_pool(self, pool, data): """Update the storage pool using the notification data.""" pool.update({ "total_capacity_gb": data['total'], "free_capacity_gb": data['free'], "provisioned_capacity_gb": data['provisioned'], "allocated_capacity_gb": data['allocated'], "virtual_free": data['virtual_free'] }) node_name = pool.name.split("#")[0] node = self.get_or_create_node(node_name) self.cluster_data_model.map_pool(pool, node) LOG.debug("Mapped pool %s to %s", pool.name, node.host) def update_pool_by_api(self, pool): """Update the storage pool using the API data.""" if not pool: return _pool = self.cinder.get_storage_pool_by_name(pool.name) pool.update({ "total_volumes": _pool.total_volumes, "total_capacity_gb": _pool.total_capacity_gb, "free_capacity_gb": _pool.free_capacity_gb, "provisioned_capacity_gb": _pool.provisioned_capacity_gb, "allocated_capacity_gb": _pool.allocated_capacity_gb }) node_name = pool.name.split("#")[0] node = self.get_or_create_node(node_name) self.cluster_data_model.map_pool(pool, node) LOG.debug("Mapped pool %s to %s", pool.name, node.host) def create_storage_node(self, name): """Create the storage node by querying the Cinder API.""" try: _node = self.cinder.get_storage_node_by_name(name) _volume_type = self.cinder.get_volume_type_by_backendname( # name is formatted as host@backendname name.split('@')[1]) storage_node = element.StorageNode( host=_node.host, zone=_node.zone, state=_node.state, status=_node.status, volume_type=_volume_type) return storage_node except Exception as exc: LOG.exception(exc) LOG.debug("Could not create storage node %s.", name) raise exception.StorageNodeNotFound(name=name) def get_or_create_node(self, name): """Get storage node by name, otherwise create storage node""" if name is None: LOG.debug("Storage node name not provided: skipping") return try: return self.cluster_data_model.get_node_by_name(name) except exception.StorageNodeNotFound: # The node didn't exist yet so we create a new node object node = self.create_storage_node(name) LOG.debug("New storage node created: %s", name) self.cluster_data_model.add_node(node) LOG.debug("New storage node added: %s", name) return node def create_pool(self, pool_name): """Create the storage pool by querying the Cinder API.""" try: _pool = self.cinder.get_storage_pool_by_name(pool_name) pool = element.Pool( name=_pool.name, total_volumes=_pool.total_volumes, total_capacity_gb=_pool.total_capacity_gb, free_capacity_gb=_pool.free_capacity_gb, provisioned_capacity_gb=_pool.provisioned_capacity_gb, allocated_capacity_gb=_pool.allocated_capacity_gb) return pool except Exception as exc: LOG.exception(exc) LOG.debug("Could not refresh the pool %s.", pool_name) raise exception.PoolNotFound(name=pool_name) def get_or_create_pool(self, name): if not name: LOG.debug("Pool name not provided: skipping") return try: return self.cluster_data_model.get_pool_by_pool_name(name) except exception.PoolNotFound: # The pool didn't exist yet so we create a new pool object pool = self.create_pool(name) LOG.debug("New storage pool created: %s", name) self.cluster_data_model.add_pool(pool) LOG.debug("New storage pool added: %s", name) return pool def get_or_create_volume(self, volume_id, pool_name=None): try: if pool_name: self.get_or_create_pool(pool_name) except exception.PoolNotFound: LOG.warning("Could not find storage pool %(pool)s for " "volume %(volume)s", dict(pool=pool_name, volume=volume_id)) try: return self.cluster_data_model.get_volume_by_uuid(volume_id) except exception.VolumeNotFound: # The volume didn't exist yet so we create a new volume object volume = element.Volume(uuid=volume_id) self.cluster_data_model.add_volume(volume) return volume def update_volume(self, volume, data): """Update the volume using the notification data.""" def _keyReplace(key): if key == 'instance_uuid': return 'server_id' if key == 'id': return 'attachment_id' attachments = [ {_keyReplace(k): v for k, v in iter(d.items()) if k in ('instance_uuid', 'id')} for d in data['volume_attachment'] ] # glance_metadata is provided if volume is bootable bootable = False if 'glance_metadata' in data: bootable = True volume.update({ "name": data['display_name'] or "", "size": data['size'], "status": data['status'], "attachments": attachments, "snapshot_id": data['snapshot_id'] or "", "project_id": data['tenant_id'], "metadata": data['metadata'], "bootable": bootable }) try: # if volume is under pool, let's update pool element. # get existing pool or create pool by cinder api pool = self.get_or_create_pool(data['host']) self.update_pool_by_api(pool) except exception.PoolNotFound as exc: LOG.exception(exc) pool = None self.update_volume_mapping(volume, pool) def update_volume_mapping(self, volume, pool): if pool is None: self.cluster_data_model.add_volume(volume) LOG.debug("Volume %s not yet attached to any pool: skipping", volume.uuid) return try: try: current_pool = ( self.cluster_data_model.get_pool_by_volume( volume) or self.get_or_create_pool(pool.name)) except exception.PoolNotFound as exc: LOG.exception(exc) # If we can't create the pool, # we consider the volume as unmapped current_pool = None LOG.debug("Mapped pool %s found", pool.name) if current_pool and pool != current_pool: LOG.debug("Unmapping volume %s from %s", volume.uuid, pool.name) self.cluster_data_model.unmap_volume(volume, current_pool) except exception.VolumeNotFound: # The instance didn't exist yet so we map it for the first time LOG.debug("New volume: mapping it to %s", pool.name) finally: if pool: self.cluster_data_model.map_volume(volume, pool) LOG.debug("Mapped volume %s to %s", volume.uuid, pool.name) def delete_volume(self, volume, pool): try: self.cluster_data_model.delete_volume(volume) except Exception: LOG.info("Volume %s already deleted", volume.uuid) try: if pool: # if volume is under pool, let's update pool element. # get existing pool or create pool by cinder api pool = self.get_or_create_pool(pool.name) self.update_pool_by_api(pool) except exception.PoolNotFound as exc: LOG.exception(exc) pool = None class CapacityNotificationEndpoint(CinderNotification): @property def filter_rule(self): """Cinder capacity notification filter""" return filtering.NotificationFilter( publisher_id=r'capacity.*', event_type='capacity.pool', ) def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type LOG.info("Event '%(event)s' received from %(publisher)s " "with metadata %(metadata)s", dict(event=event_type, publisher=publisher_id, metadata=metadata)) LOG.debug(payload) name = payload['name_to_id'] try: pool = self.get_or_create_pool(name) self.update_pool(pool, payload) except exception.PoolNotFound as exc: LOG.exception(exc) class VolumeNotificationEndpoint(CinderNotification): publisher_id_regex = r'^volume.*' class VolumeCreateEnd(VolumeNotificationEndpoint): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.create.end', ) def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type LOG.info("Event '%(event)s' received from %(publisher)s " "with metadata %(metadata)s", dict(event=event_type, publisher=publisher_id, metadata=metadata)) LOG.debug(payload) volume_id = payload['volume_id'] poolname = payload['host'] volume = self.get_or_create_volume(volume_id, poolname) self.update_volume(volume, payload) class VolumeUpdateEnd(VolumeNotificationEndpoint): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.update.end', ) def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type LOG.info("Event '%(event)s' received from %(publisher)s " "with metadata %(metadata)s", dict(event=event_type, publisher=publisher_id, metadata=metadata)) LOG.debug(payload) volume_id = payload['volume_id'] poolname = payload['host'] volume = self.get_or_create_volume(volume_id, poolname) self.update_volume(volume, payload) class VolumeAttachEnd(VolumeUpdateEnd): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.attach.end', ) class VolumeDetachEnd(VolumeUpdateEnd): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.detach.end', ) class VolumeResizeEnd(VolumeUpdateEnd): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.resize.end', ) class VolumeDeleteEnd(VolumeNotificationEndpoint): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.delete.end', ) def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type LOG.info("Event '%(event)s' received from %(publisher)s " "with metadata %(metadata)s", dict(event=event_type, publisher=publisher_id, metadata=metadata)) LOG.debug(payload) volume_id = payload['volume_id'] poolname = payload['host'] volume = self.get_or_create_volume(volume_id, poolname) try: pool = self.get_or_create_pool(poolname) except exception.PoolNotFound as exc: LOG.exception(exc) pool = None self.delete_volume(volume, pool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/notification/filtering.py0000664000175000017500000000550700000000000027566 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re import oslo_messaging as om class NotificationFilter(om.NotificationFilter): """Notification Endpoint base class This class is responsible for handling incoming notifications. Depending on the priority level of the incoming, you may need to implement one or more of the following methods: .. code: py def audit(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) def info(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) def warn(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) def error(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) def critical(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) """ def _build_regex_dict(self, regex_list): if regex_list is None: return {} regex_mapping = {} for key, value in regex_list.items(): if isinstance(value, dict): regex_mapping[key] = self._build_regex_dict(value) else: if callable(value): regex_mapping[key] = value elif value is not None: regex_mapping[key] = re.compile(value) else: regex_mapping[key] = None return regex_mapping def _check_for_mismatch(self, data, regex): if isinstance(regex, dict): mismatch_results = [ k not in data or not self._check_for_mismatch(data[k], v) for k, v in regex.items() ] if not mismatch_results: return False return all(mismatch_results) elif callable(regex): # The filter is a callable that should return True # if there is a mismatch return regex(data) elif regex is not None and data is None: return True elif (regex is not None and isinstance(data, str) and not regex.match(data)): return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/model/notification/nova.py0000664000175000017500000003456500000000000026554 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os_resource_classes as orc from oslo_log import log from watcher.common import exception from watcher.common import nova_helper from watcher.common import placement_helper from watcher.common import utils from watcher.decision_engine.model import element from watcher.decision_engine.model.notification import base from watcher.decision_engine.model.notification import filtering LOG = log.getLogger(__name__) class NovaNotification(base.NotificationEndpoint): def __init__(self, collector): super(NovaNotification, self).__init__(collector) self._nova = None self._placement_helper = None @property def nova(self): if self._nova is None: self._nova = nova_helper.NovaHelper() return self._nova @property def placement_helper(self): if self._placement_helper is None: self._placement_helper = placement_helper.PlacementHelper() return self._placement_helper def get_or_create_instance(self, instance_uuid, node_name=None): try: node = None if node_name: node = self.get_or_create_node(node_name) except exception.ComputeNodeNotFound: LOG.warning("Could not find compute node %(node)s for " "instance %(instance)s", dict(node=node_name, instance=instance_uuid)) try: instance = self.cluster_data_model.get_instance_by_uuid( instance_uuid) except exception.InstanceNotFound: # The instance didn't exist yet so we create a new instance object LOG.debug("New instance created: %s", instance_uuid) instance = element.Instance(uuid=instance_uuid) self.cluster_data_model.add_instance(instance) if node: self.cluster_data_model.map_instance(instance, node) return instance def update_instance(self, instance, data): n_version = float(data['nova_object.version']) instance_data = data['nova_object.data'] instance_flavor_data = instance_data['flavor']['nova_object.data'] memory_mb = instance_flavor_data['memory_mb'] num_cores = instance_flavor_data['vcpus'] disk_gb = instance_flavor_data['root_gb'] instance_metadata = data['nova_object.data']['metadata'] instance.update({ 'state': instance_data['state'], 'hostname': instance_data['host_name'], # this is the user-provided display name of the server which is not # guaranteed to be unique nor is it immutable. 'name': instance_data['display_name'], 'memory': memory_mb, 'vcpus': num_cores, 'disk': disk_gb, 'metadata': instance_metadata, 'project_id': instance_data['tenant_id'] }) # locked was added in nova notification payload version 1.1 if n_version > 1.0: instance.update({'locked': instance_data['locked']}) try: node = self.get_or_create_node(instance_data['host']) except exception.ComputeNodeNotFound as exc: LOG.exception(exc) # If we can't create the node, we consider the instance as unmapped node = None self.update_instance_mapping(instance, node) def update_compute_node(self, node, data): """Update the compute node using the notification data.""" node_data = data['nova_object.data'] node_state = ( element.ServiceState.OFFLINE.value if node_data['forced_down'] else element.ServiceState.ONLINE.value) node_status = ( element.ServiceState.DISABLED.value if node_data['disabled'] else element.ServiceState.ENABLED.value) disabled_reason = ( node_data['disabled_reason'] if node_data['disabled'] else None) node.update({ 'hostname': node_data['host'], 'state': node_state, 'status': node_status, 'disabled_reason': disabled_reason, }) def create_compute_node(self, uuid_or_name): """Create the computeNode node.""" try: if utils.is_uuid_like(uuid_or_name): _node = self.nova.get_compute_node_by_uuid(uuid_or_name) else: _node = self.nova.get_compute_node_by_hostname(uuid_or_name) inventories = self.placement_helper.get_inventories(_node.id) if inventories and orc.VCPU in inventories: vcpus = inventories[orc.VCPU]['total'] vcpu_reserved = inventories[orc.VCPU]['reserved'] vcpu_ratio = inventories[orc.VCPU]['allocation_ratio'] else: vcpus = _node.vcpus vcpu_reserved = 0 vcpu_ratio = 1.0 if inventories and orc.MEMORY_MB in inventories: memory_mb = inventories[orc.MEMORY_MB]['total'] memory_mb_reserved = inventories[orc.MEMORY_MB]['reserved'] memory_ratio = inventories[orc.MEMORY_MB]['allocation_ratio'] else: memory_mb = _node.memory_mb memory_mb_reserved = 0 memory_ratio = 1.0 # NOTE(licanwei): A BP support-shared-storage-resource-provider # will move DISK_GB from compute node to shared storage RP. # Here may need to be updated when the nova BP released. if inventories and orc.DISK_GB in inventories: disk_capacity = inventories[orc.DISK_GB]['total'] disk_gb_reserved = inventories[orc.DISK_GB]['reserved'] disk_ratio = inventories[orc.DISK_GB]['allocation_ratio'] else: disk_capacity = _node.local_gb disk_gb_reserved = 0 disk_ratio = 1.0 # build up the compute node. node_attributes = { # The id of the hypervisor as a UUID from version 2.53. "uuid": _node.id, "hostname": _node.service["host"], "memory": memory_mb, "memory_ratio": memory_ratio, "memory_mb_reserved": memory_mb_reserved, "disk": disk_capacity, "disk_gb_reserved": disk_gb_reserved, "disk_ratio": disk_ratio, "vcpus": vcpus, "vcpu_reserved": vcpu_reserved, "vcpu_ratio": vcpu_ratio, "state": _node.state, "status": _node.status, "disabled_reason": _node.service["disabled_reason"]} node = element.ComputeNode(**node_attributes) self.cluster_data_model.add_node(node) LOG.debug("New compute node mapped: %s", node.uuid) return node except Exception as exc: LOG.exception(exc) LOG.debug("Could not refresh the node %s.", uuid_or_name) raise exception.ComputeNodeNotFound(name=uuid_or_name) def get_or_create_node(self, uuid_or_name): if uuid_or_name is None: LOG.debug("Compute node UUID or name not provided: skipping") return try: if utils.is_uuid_like(uuid_or_name): return self.cluster_data_model.get_node_by_uuid(uuid_or_name) else: return self.cluster_data_model.get_node_by_name(uuid_or_name) except exception.ComputeNodeNotFound: # The node didn't exist yet so we create a new node object node = self.create_compute_node(uuid_or_name) LOG.debug("New compute node created: %s", uuid_or_name) return node def update_instance_mapping(self, instance, node): if node is None: self.cluster_data_model.add_instance(instance) LOG.debug("Instance %s not yet attached to any node: skipping", instance.uuid) return try: try: current_node = ( self.cluster_data_model.get_node_by_instance_uuid( instance.uuid)) except exception.ComputeResourceNotFound as exc: LOG.exception(exc) # If we can't create the node, # we consider the instance as unmapped current_node = None LOG.debug("Mapped node %s found", node.uuid) if current_node and node != current_node: LOG.debug("Unmapping instance %s from %s", instance.uuid, node.uuid) self.cluster_data_model.unmap_instance(instance, current_node) except exception.InstanceNotFound: # The instance didn't exist yet so we map it for the first time LOG.debug("New instance: mapping it to %s", node.uuid) finally: if node: self.cluster_data_model.map_instance(instance, node) LOG.debug("Mapped instance %s to %s", instance.uuid, node.uuid) def delete_instance(self, instance, node): try: self.cluster_data_model.delete_instance(instance, node) except Exception: LOG.info("Instance %s already deleted", instance.uuid) def delete_node(self, node): try: self.cluster_data_model.remove_node(node) except Exception: LOG.info("Node %s already deleted", node.uuid) class VersionedNotification(NovaNotification): publisher_id_regex = r'^nova-.*' def service_updated(self, payload): node_data = payload['nova_object.data'] node_name = node_data['host'] try: node = self.get_or_create_node(node_name) self.update_compute_node(node, payload) except exception.ComputeNodeNotFound as exc: LOG.exception(exc) def service_deleted(self, payload): node_data = payload['nova_object.data'] node_name = node_data['host'] try: node = self.get_or_create_node(node_name) self.delete_node(node) except exception.ComputeNodeNotFound as exc: LOG.exception(exc) def instance_updated(self, payload): instance_data = payload['nova_object.data'] instance_uuid = instance_data['uuid'] instance_state = instance_data['state'] node_name = instance_data.get('host') # if instance state is building, don't update data model if instance_state == 'building': return instance = self.get_or_create_instance(instance_uuid, node_name) self.update_instance(instance, payload) def instance_created(self, payload): instance_data = payload['nova_object.data'] instance_uuid = instance_data['uuid'] instance = element.Instance(uuid=instance_uuid) self.cluster_data_model.add_instance(instance) node_name = instance_data.get('host') if node_name: node = self.get_or_create_node(node_name) self.cluster_data_model.map_instance(instance, node) self.update_instance(instance, payload) def instance_deleted(self, payload): instance_data = payload['nova_object.data'] instance_uuid = instance_data['uuid'] node_name = instance_data.get('host') instance = self.get_or_create_instance(instance_uuid, node_name) try: node = self.get_or_create_node(instance_data['host']) except exception.ComputeNodeNotFound as exc: LOG.exception(exc) # If we can't create the node, we consider the instance as unmapped node = None self.delete_instance(instance, node) notification_mapping = { 'instance.create.end': instance_created, 'instance.lock': instance_updated, 'instance.unlock': instance_updated, 'instance.pause.end': instance_updated, 'instance.power_off.end': instance_updated, 'instance.power_on.end': instance_updated, 'instance.resize_confirm.end': instance_updated, 'instance.restore.end': instance_updated, 'instance.resume.end': instance_updated, 'instance.shelve.end': instance_updated, 'instance.shutdown.end': instance_updated, 'instance.suspend.end': instance_updated, 'instance.unpause.end': instance_updated, 'instance.unrescue.end': instance_updated, 'instance.unshelve.end': instance_updated, 'instance.rebuild.end': instance_updated, 'instance.rescue.end': instance_updated, 'instance.update': instance_updated, 'instance.live_migration_force_complete.end': instance_updated, 'instance.live_migration_post.end': instance_updated, 'instance.delete.end': instance_deleted, 'instance.soft_delete.end': instance_deleted, 'service.create': service_updated, 'service.delete': service_deleted, 'service.update': service_updated, } @property def filter_rule(self): """Nova notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, ) def info(self, ctxt, publisher_id, event_type, payload, metadata): LOG.info("Event '%(event)s' received from %(publisher)s " "with metadata %(metadata)s", dict(event=event_type, publisher=publisher_id, metadata=metadata)) func = self.notification_mapping.get(event_type) if func: # The nova CDM is not built until an audit is performed. if self.cluster_data_model: LOG.debug(payload) func(self, payload) else: LOG.debug('Nova CDM has not yet been built; ignoring ' 'notifications until an audit is performed.') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6351352 python_watcher-14.0.0/watcher/decision_engine/planner/0000775000175000017500000000000000000000000023073 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/planner/__init__.py0000664000175000017500000000000000000000000025172 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/planner/base.py0000664000175000017500000000577500000000000024375 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The :ref:`Watcher Planner ` is part of the :ref:`Watcher Decision Engine `. This module takes the set of :ref:`Actions ` generated by a :ref:`Strategy ` and builds the design of a workflow which defines how-to schedule in time those different :ref:`Actions ` and for each :ref:`Action ` what are the prerequisite conditions. It is important to schedule :ref:`Actions ` in time in order to prevent overload of the :ref:`Cluster ` while applying the :ref:`Action Plan `. For example, it is important not to migrate too many instances at the same time in order to avoid a network congestion which may decrease the :ref:`SLA ` for :ref:`Customers `. It is also important to schedule :ref:`Actions ` in order to avoid security issues such as denial of service on core OpenStack services. :ref:`Some default implementations are provided `, but it is possible to :ref:`develop new implementations ` which are dynamically loaded by Watcher at launch time. See :doc:`../architecture` for more details on this component. """ import abc from watcher.common.loader import loadable class BasePlanner(loadable.Loadable, metaclass=abc.ABCMeta): @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] @abc.abstractmethod def schedule(self, context, audit_uuid, solution): """The planner receives a solution to schedule :param solution: A solution provided by a strategy for scheduling :type solution: :py:class:`~.BaseSolution` subclass instance :param audit_uuid: the audit uuid :type audit_uuid: str :return: Action plan with an ordered sequence of actions such that all security, dependency, and performance requirements are met. :rtype: :py:class:`watcher.objects.ActionPlan` instance """ # example: directed acyclic graph raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/planner/manager.py0000664000175000017500000000200200000000000025051 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.decision_engine.loading import default as loader LOG = log.getLogger(__name__) class PlannerManager(object): def __init__(self): self._loader = loader.DefaultPlannerLoader() @property def loader(self): return self._loader def load(self, planner_name): LOG.debug("Loading %s", planner_name) return self.loader.load(name=planner_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/planner/node_resource_consolidation.py0000664000175000017500000001343400000000000031233 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.common import exception from watcher.common import utils from watcher.decision_engine.model import element from watcher.decision_engine.planner import base from watcher import objects LOG = log.getLogger(__name__) class NodeResourceConsolidationPlanner(base.BasePlanner): """Node Resource Consolidation planner implementation This implementation preserves the original order of actions in the solution and try to parallelize actions which have the same action type. *Limitations* - This is a proof of concept that is not meant to be used in production """ def create_action(self, action_plan_id, action_type, input_parameters=None): uuid = utils.generate_uuid() action = { 'uuid': uuid, 'action_plan_id': int(action_plan_id), 'action_type': action_type, 'input_parameters': input_parameters, 'state': objects.action.State.PENDING, 'parents': None } return action def schedule(self, context, audit_id, solution): LOG.debug('Creating an action plan for the audit uuid: %s', audit_id) action_plan = self._create_action_plan(context, audit_id, solution) actions = list(solution.actions) if len(actions) == 0: LOG.warning("The action plan is empty") action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() return action_plan node_disabled_actions = [] node_enabled_actions = [] node_migrate_actions = {} for action in actions: action_type = action.get('action_type') parameters = action.get('input_parameters') json_action = self.create_action( action_plan_id=action_plan.id, action_type=action_type, input_parameters=parameters) # classing actions if action_type == 'change_nova_service_state': if parameters.get('state') == ( element.ServiceState.DISABLED.value): node_disabled_actions.append(json_action) else: node_enabled_actions.append(json_action) elif action_type == 'migrate': source_node = parameters.get('source_node') if source_node in node_migrate_actions: node_migrate_actions[source_node].append(json_action) else: node_migrate_actions[source_node] = [json_action] else: raise exception.UnsupportedActionType( action_type=action.get("action_type")) # creating actions mig_parents = [] for action in node_disabled_actions: mig_parents.append(action['uuid']) self._create_action(context, action) enabled_parents = [] for actions in node_migrate_actions.values(): enabled_parents.append(actions[-1].get('uuid')) pre_action_uuid = [] for action in actions: action['parents'] = mig_parents + pre_action_uuid pre_action_uuid = [action['uuid']] self._create_action(context, action) for action in node_enabled_actions: action['parents'] = enabled_parents self._create_action(context, action) self._create_efficacy_indicators( context, action_plan.id, solution.efficacy_indicators) return action_plan def _create_action_plan(self, context, audit_id, solution): strategy = objects.Strategy.get_by_name( context, solution.strategy.name) action_plan_dict = { 'uuid': utils.generate_uuid(), 'audit_id': audit_id, 'strategy_id': strategy.id, 'state': objects.action_plan.State.RECOMMENDED, 'global_efficacy': solution.global_efficacy, } new_action_plan = objects.ActionPlan(context, **action_plan_dict) new_action_plan.create() return new_action_plan def _create_efficacy_indicators(self, context, action_plan_id, indicators): efficacy_indicators = [] for indicator in indicators: efficacy_indicator_dict = { 'uuid': utils.generate_uuid(), 'name': indicator.name, 'description': indicator.description, 'unit': indicator.unit, 'value': indicator.value, 'action_plan_id': action_plan_id, } new_efficacy_indicator = objects.EfficacyIndicator( context, **efficacy_indicator_dict) new_efficacy_indicator.create() efficacy_indicators.append(new_efficacy_indicator) return efficacy_indicators def _create_action(self, context, _action): try: LOG.debug("Creating the %s in the Watcher database", _action.get("action_type")) new_action = objects.Action(context, **_action) new_action.create() return new_action except Exception as exc: LOG.exception(exc) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/planner/weight.py0000664000175000017500000002013200000000000024732 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Authors: Vincent Francoise # Alexander Chadin # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import networkx as nx from oslo_config import cfg from oslo_log import log from watcher.common import utils from watcher.decision_engine.planner import base from watcher import objects LOG = log.getLogger(__name__) class WeightPlanner(base.BasePlanner): """Weight planner implementation This implementation builds actions with parents in accordance with weights. Set of actions having a higher weight will be scheduled before the other ones. There are two config options to configure: action_weights and parallelization. *Limitations* - This planner requires to have action_weights and parallelization configs tuned well. """ def __init__(self, config): super(WeightPlanner, self).__init__(config) action_weights = { 'nop': 70, 'volume_migrate': 60, 'change_nova_service_state': 50, 'sleep': 40, 'migrate': 30, 'resize': 20, 'turn_host_to_acpi_s3_state': 10, 'change_node_power_state': 9, } parallelization = { 'turn_host_to_acpi_s3_state': 2, 'resize': 2, 'migrate': 2, 'sleep': 1, 'change_nova_service_state': 1, 'nop': 1, 'change_node_power_state': 2, 'volume_migrate': 2 } @classmethod def get_config_opts(cls): return [ cfg.DictOpt( 'weights', help="These weights are used to schedule the actions. " "Action Plan will be build in accordance with sets of " "actions ordered by descending weights." "Two action types cannot have the same weight. ", default=cls.action_weights), cfg.DictOpt( 'parallelization', help="Number of actions to be run in parallel on a per " "action type basis.", default=cls.parallelization), ] @staticmethod def chunkify(lst, n): """Yield successive n-sized chunks from lst.""" n = int(n) if n < 1: # Just to make sure the number is valid n = 1 # Split a flat list in a list of chunks of size n. # e.g. chunkify([0, 1, 2, 3, 4], 2) -> [[0, 1], [2, 3], [4]] for i in range(0, len(lst), n): yield lst[i:i + n] def compute_action_graph(self, sorted_weighted_actions): reverse_weights = {v: k for k, v in self.config.weights.items()} # leaf_groups contains a list of list of nodes called groups # each group is a set of nodes from which a future node will # branch off (parent nodes). # START --> migrate-1 --> migrate-3 # \ \--> resize-1 --> FINISH # \--> migrate-2 -------------/ # In the above case migrate-1 will be the only member of the leaf # group that migrate-3 will use as parent group, whereas # resize-1 will have both migrate-2 and migrate-3 in its # parent/leaf group leaf_groups = [] action_graph = nx.DiGraph() # We iterate through each action type category (sorted by weight) to # insert them in a Directed Acyclic Graph for idx, (weight, actions) in enumerate(sorted_weighted_actions): action_chunks = self.chunkify( actions, self.config.parallelization[reverse_weights[weight]]) # We split the actions into chunks/layers that will have to be # spread across all the available branches of the graph for chunk_idx, actions_chunk in enumerate(action_chunks): for action in actions_chunk: action_graph.add_node(action) # all other actions parent_nodes = [] if not idx and not chunk_idx: parent_nodes = [] elif leaf_groups: parent_nodes = leaf_groups for parent_node in parent_nodes: action_graph.add_edge(parent_node, action) action.parents.append(parent_node.uuid) if leaf_groups: leaf_groups = [] leaf_groups.extend([a for a in actions_chunk]) return action_graph def schedule(self, context, audit_id, solution): LOG.debug('Creating an action plan for the audit uuid: %s', audit_id) action_plan = self.create_action_plan(context, audit_id, solution) sorted_weighted_actions = self.get_sorted_actions_by_weight( context, action_plan, solution) action_graph = self.compute_action_graph(sorted_weighted_actions) self._create_efficacy_indicators( context, action_plan.id, solution.efficacy_indicators) if len(action_graph.nodes()) == 0: LOG.warning("The action plan is empty") action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() self.create_scheduled_actions(action_graph) return action_plan def get_sorted_actions_by_weight(self, context, action_plan, solution): # We need to make them immutable to add them to the graph action_objects = list([ objects.Action( context, uuid=utils.generate_uuid(), parents=[], action_plan_id=action_plan.id, **a) for a in solution.actions]) # This is a dict of list with each being a weight and the list being # all the actions associated to this weight weighted_actions = collections.defaultdict(list) for action in action_objects: action_weight = self.config.weights[action.action_type] weighted_actions[action_weight].append(action) return reversed(sorted(weighted_actions.items(), key=lambda x: x[0])) def create_scheduled_actions(self, graph): for action in graph.nodes(): LOG.debug("Creating the %s in the Watcher database", action.action_type) try: action.create() except Exception as exc: LOG.exception(exc) raise def create_action_plan(self, context, audit_id, solution): strategy = objects.Strategy.get_by_name( context, solution.strategy.name) action_plan_dict = { 'uuid': utils.generate_uuid(), 'audit_id': audit_id, 'strategy_id': strategy.id, 'state': objects.action_plan.State.RECOMMENDED, 'global_efficacy': solution.global_efficacy, } new_action_plan = objects.ActionPlan(context, **action_plan_dict) new_action_plan.create() return new_action_plan def _create_efficacy_indicators(self, context, action_plan_id, indicators): efficacy_indicators = [] for indicator in indicators: efficacy_indicator_dict = { 'uuid': utils.generate_uuid(), 'name': indicator.name, 'description': indicator.description, 'unit': indicator.unit, 'value': indicator.value, 'action_plan_id': action_plan_id, } new_efficacy_indicator = objects.EfficacyIndicator( context, **efficacy_indicator_dict) new_efficacy_indicator.create() efficacy_indicators.append(new_efficacy_indicator) return efficacy_indicators ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/planner/workload_stabilization.py0000664000175000017500000002567000000000000030235 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from oslo_config import cfg from oslo_log import log from watcher.common import clients from watcher.common import exception from watcher.common import nova_helper from watcher.common import utils from watcher.decision_engine.planner import base from watcher import objects LOG = log.getLogger(__name__) class WorkloadStabilizationPlanner(base.BasePlanner): """Workload Stabilization planner implementation This implementation comes with basic rules with a set of action types that are weighted. An action having a lower weight will be scheduled before the other ones. The set of action types can be specified by 'weights' in the ``watcher.conf``. You need to associate a different weight to all available actions into the configuration file, otherwise you will get an error when the new action will be referenced in the solution produced by a strategy. *Limitations* - This is a proof of concept that is not meant to be used in production """ def __init__(self, config): super(WorkloadStabilizationPlanner, self).__init__(config) self._osc = clients.OpenStackClients() @property def osc(self): return self._osc weights_dict = { 'turn_host_to_acpi_s3_state': 0, 'resize': 1, 'migrate': 2, 'sleep': 3, 'change_nova_service_state': 4, 'nop': 5, } @classmethod def get_config_opts(cls): return [ cfg.DictOpt( 'weights', help="These weights are used to schedule the actions", default=cls.weights_dict), ] def create_action(self, action_plan_id, action_type, input_parameters=None): uuid = utils.generate_uuid() action = { 'uuid': uuid, 'action_plan_id': int(action_plan_id), 'action_type': action_type, 'input_parameters': input_parameters, 'state': objects.action.State.PENDING, 'parents': None } return action def load_child_class(self, child_name): for c in BaseActionValidator.__subclasses__(): if child_name == c.action_name: return c() return None def schedule(self, context, audit_id, solution): LOG.debug('Creating an action plan for the audit uuid: %s', audit_id) weights = self.config.weights action_plan = self._create_action_plan(context, audit_id, solution) actions = list(solution.actions) to_schedule = [] for action in actions: json_action = self.create_action( action_plan_id=action_plan.id, action_type=action.get('action_type'), input_parameters=action.get('input_parameters')) to_schedule.append((weights[action.get('action_type')], json_action)) self._create_efficacy_indicators( context, action_plan.id, solution.efficacy_indicators) # scheduling scheduled = sorted(to_schedule, key=lambda weight: (weight[0]), reverse=True) if len(scheduled) == 0: LOG.warning("The action plan is empty") action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() else: resource_action_map = {} scheduled_actions = [x[1] for x in scheduled] for action in scheduled_actions: a_type = action['action_type'] if a_type != 'turn_host_to_acpi_s3_state': plugin_action = self.load_child_class( action.get("action_type")) if not plugin_action: raise exception.UnsupportedActionType( action_type=action.get("action_type")) db_action = self._create_action(context, action) parents = plugin_action.validate_parents( resource_action_map, action) if parents: db_action.parents = parents db_action.save() # if we have an action that will make host unreachable, we need # to complete all actions (resize and migration type) # related to the host. # Note(alexchadin): turn_host_to_acpi_s3_state doesn't # actually exist. Placed code shows relations between # action types. # TODO(alexchadin): add turn_host_to_acpi_s3_state action type. else: host_to_acpi_s3 = action['input_parameters']['resource_id'] host_actions = resource_action_map.get(host_to_acpi_s3) action_parents = [] if host_actions: resize_actions = [x[0] for x in host_actions if x[1] == 'resize'] migrate_actions = [x[0] for x in host_actions if x[1] == 'migrate'] resize_migration_parents = [ x.parents for x in [objects.Action.get_by_uuid(context, resize_action) for resize_action in resize_actions]] # resize_migration_parents should be one level list resize_migration_parents = [ parent for sublist in resize_migration_parents for parent in sublist] action_parents.extend([uuid for uuid in resize_actions]) action_parents.extend([uuid for uuid in migrate_actions if uuid not in resize_migration_parents]) db_action = self._create_action(context, action) db_action.parents = action_parents db_action.save() return action_plan def _create_action_plan(self, context, audit_id, solution): strategy = objects.Strategy.get_by_name( context, solution.strategy.name) action_plan_dict = { 'uuid': utils.generate_uuid(), 'audit_id': audit_id, 'strategy_id': strategy.id, 'state': objects.action_plan.State.RECOMMENDED, 'global_efficacy': solution.global_efficacy, } new_action_plan = objects.ActionPlan(context, **action_plan_dict) new_action_plan.create() return new_action_plan def _create_efficacy_indicators(self, context, action_plan_id, indicators): efficacy_indicators = [] for indicator in indicators: efficacy_indicator_dict = { 'uuid': utils.generate_uuid(), 'name': indicator.name, 'description': indicator.description, 'unit': indicator.unit, 'value': indicator.value, 'action_plan_id': action_plan_id, } new_efficacy_indicator = objects.EfficacyIndicator( context, **efficacy_indicator_dict) new_efficacy_indicator.create() efficacy_indicators.append(new_efficacy_indicator) return efficacy_indicators def _create_action(self, context, _action): try: LOG.debug("Creating the %s in the Watcher database", _action.get("action_type")) new_action = objects.Action(context, **_action) new_action.create() return new_action except Exception as exc: LOG.exception(exc) raise class BaseActionValidator(object): action_name = None def __init__(self): super(BaseActionValidator, self).__init__() self._osc = None @property def osc(self): if not self._osc: self._osc = clients.OpenStackClients() return self._osc @abc.abstractmethod def validate_parents(self, resource_action_map, action): raise NotImplementedError() def _mapping(self, resource_action_map, resource_id, action_uuid, action_type): if resource_id not in resource_action_map: resource_action_map[resource_id] = [(action_uuid, action_type,)] else: resource_action_map[resource_id].append((action_uuid, action_type,)) class MigrationActionValidator(BaseActionValidator): action_name = "migrate" def validate_parents(self, resource_action_map, action): instance_uuid = action['input_parameters']['resource_id'] host_name = action['input_parameters']['source_node'] self._mapping(resource_action_map, instance_uuid, action['uuid'], 'migrate') self._mapping(resource_action_map, host_name, action['uuid'], 'migrate') class ResizeActionValidator(BaseActionValidator): action_name = "resize" def validate_parents(self, resource_action_map, action): nova = nova_helper.NovaHelper(osc=self.osc) instance_uuid = action['input_parameters']['resource_id'] parent_actions = resource_action_map.get(instance_uuid) host_of_instance = nova.get_hostname( nova.get_instance_by_uuid(instance_uuid)[0]) self._mapping(resource_action_map, host_of_instance, action['uuid'], 'resize') if parent_actions: return [x[0] for x in parent_actions] else: return [] class ChangeNovaServiceStateActionValidator(BaseActionValidator): action_name = "change_nova_service_state" def validate_parents(self, resource_action_map, action): host_name = action['input_parameters']['resource_id'] self._mapping(resource_action_map, host_name, action['uuid'], 'change_nova_service_state') return [] class SleepActionValidator(BaseActionValidator): action_name = "sleep" def validate_parents(self, resource_action_map, action): return [] class NOPActionValidator(BaseActionValidator): action_name = "nop" def validate_parents(self, resource_action_map, action): return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/rpcapi.py0000664000175000017500000000435600000000000023274 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # Copyright (c) 2016 Intel Corp # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from watcher.common import exception from watcher.common import service from watcher.common import service_manager from watcher.common import utils from watcher import conf CONF = conf.CONF class DecisionEngineAPI(service.Service): def __init__(self): super(DecisionEngineAPI, self).__init__(DecisionEngineAPIManager) def trigger_audit(self, context, audit_uuid=None): if not utils.is_uuid_like(audit_uuid): raise exception.InvalidUuidOrName(name=audit_uuid) self.conductor_client.cast( context, 'trigger_audit', audit_uuid=audit_uuid) def get_strategy_info(self, context, strategy_name): return self.conductor_client.call( context, 'get_strategy_info', strategy_name=strategy_name) def get_data_model_info(self, context, data_model_type, audit): return self.conductor_client.call( context, 'get_data_model_info', data_model_type=data_model_type, audit=audit) class DecisionEngineAPIManager(service_manager.ServiceManager): @property def service_name(self): return None @property def api_version(self): return '1.0' @property def publisher_id(self): return CONF.watcher_decision_engine.publisher_id @property def conductor_topic(self): return CONF.watcher_decision_engine.conductor_topic @property def notification_topics(self): return [] @property def conductor_endpoints(self): return [] @property def notification_endpoints(self): return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/scheduling.py0000664000175000017500000001037500000000000024141 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import eventlet from oslo_log import log from watcher.common import context from watcher.common import exception from watcher.common import scheduling from watcher.decision_engine.model.collector import manager from watcher import objects from watcher import conf LOG = log.getLogger(__name__) CONF = conf.CONF class DecisionEngineSchedulingService(scheduling.BackgroundSchedulerService): def __init__(self, gconfig=None, **options): gconfig = None or {} super(DecisionEngineSchedulingService, self).__init__( gconfig, **options) self.collector_manager = manager.CollectorManager() @property def collectors(self): return self.collector_manager.get_collectors() def add_sync_jobs(self): for name, collector in self.collectors.items(): timed_task = self._wrap_collector_sync_with_timeout( collector, name) self.add_job(timed_task, trigger='interval', seconds=collector.config.period, next_run_time=datetime.datetime.now()) def _as_timed_sync_func(self, sync_func, name, timeout): def _timed_sync(): with eventlet.Timeout( timeout, exception=exception.ClusterDataModelCollectionError(cdm=name) ): sync_func() return _timed_sync def _wrap_collector_sync_with_timeout(self, collector, name): """Add an execution timeout constraint on a function""" timeout = collector.config.period def _sync(): try: timed_sync = self._as_timed_sync_func( collector.synchronize, name, timeout) timed_sync() except Exception as exc: LOG.exception(exc) collector.set_cluster_data_model_as_stale() return _sync def add_checkstate_job(self): # 30 minutes interval interval = CONF.watcher_decision_engine.check_periodic_interval ap_manager = objects.action_plan.StateManager() if CONF.watcher_decision_engine.action_plan_expiry != 0: self.add_job(ap_manager.check_expired, 'interval', args=[context.make_context()], seconds=interval, next_run_time=datetime.datetime.now()) def cancel_ongoing_audits(self): audit_filters = { 'audit_type': objects.audit.AuditType.ONESHOT.value, 'state': objects.audit.State.ONGOING, 'hostname': CONF.host } local_context = context.make_context() ongoing_audits = objects.Audit.list( local_context, filters=audit_filters) for audit in ongoing_audits: audit.state = objects.audit.State.CANCELLED audit.save() LOG.info("Audit %(uuid)s has been cancelled because it was in " "%(state)s state when Decision Engine had been stopped " "on %(hostname)s host.", {'uuid': audit.uuid, 'state': objects.audit.State.ONGOING, 'hostname': audit.hostname}) def start(self): """Start service.""" self.add_sync_jobs() self.add_checkstate_job() self.cancel_ongoing_audits() super(DecisionEngineSchedulingService, self).start() def stop(self): """Stop service.""" self.shutdown() def wait(self): """Wait for service to complete.""" def reset(self): """Reset service. Called in case service running in daemon mode receives SIGHUP. """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6351352 python_watcher-14.0.0/watcher/decision_engine/scope/0000775000175000017500000000000000000000000022545 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/scope/__init__.py0000664000175000017500000000000000000000000024644 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/scope/baremetal.py0000664000175000017500000000420600000000000025055 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2018 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.scope import base class BaremetalScope(base.BaseScope): """Baremetal Audit Scope Handler""" def __init__(self, scope, config, osc=None): super(BaremetalScope, self).__init__(scope, config) self._osc = osc def exclude_resources(self, resources, **kwargs): nodes_to_exclude = kwargs.get('nodes') for resource in resources: if 'ironic_nodes' in resource: nodes_to_exclude.extend( [node['uuid'] for node in resource['ironic_nodes']]) def remove_nodes_from_model(self, nodes_to_exclude, cluster_model): for node_uuid in nodes_to_exclude: node = cluster_model.get_node_by_uuid(node_uuid) cluster_model.remove_node(node) def get_scoped_model(self, cluster_model): """Leave only nodes and instances proposed in the audit scope""" if not cluster_model: return None nodes_to_exclude = [] baremetal_scope = [] if not self.scope: return cluster_model for scope in self.scope: baremetal_scope = scope.get('baremetal') if baremetal_scope: break if not baremetal_scope: return cluster_model for rule in baremetal_scope: if 'exclude' in rule: self.exclude_resources( rule['exclude'], nodes=nodes_to_exclude) self.remove_nodes_from_model(nodes_to_exclude, cluster_model) return cluster_model ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/scope/base.py0000664000175000017500000000225000000000000024030 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from watcher.common import context class BaseScope(object, metaclass=abc.ABCMeta): """A base class for Scope mechanism Child of this class is called when audit launches strategy. This strategy requires Cluster Data Model which can be segregated to achieve audit scope. """ def __init__(self, scope, config): self.ctx = context.make_context() self.scope = scope self.config = config @abc.abstractmethod def get_scoped_model(self, cluster_model): """Leave only nodes and instances proposed in the audit scope""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/scope/compute.py0000664000175000017500000002215100000000000024574 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from watcher.common import exception from watcher.common import nova_helper from watcher.decision_engine.scope import base LOG = log.getLogger(__name__) class ComputeScope(base.BaseScope): """Compute Audit Scope Handler""" def __init__(self, scope, config, osc=None): super(ComputeScope, self).__init__(scope, config) self._osc = osc self.wrapper = nova_helper.NovaHelper(osc=self._osc) def remove_instance(self, cluster_model, instance, node_uuid): node = cluster_model.get_node_by_uuid(node_uuid) cluster_model.delete_instance(instance, node) def update_exclude_instance(self, cluster_model, instance, node_uuid): node = cluster_model.get_node_by_uuid(node_uuid) cluster_model.unmap_instance(instance, node) instance.update({"watcher_exclude": True}) cluster_model.map_instance(instance, node) def _check_wildcard(self, aggregate_list): if '*' in aggregate_list: if len(aggregate_list) == 1: return True else: raise exception.WildcardCharacterIsUsed( resource="host aggregates") return False def _collect_aggregates(self, host_aggregates, compute_nodes): aggregate_list = self.wrapper.get_aggregate_list() aggregate_ids = [aggregate['id'] for aggregate in host_aggregates if 'id' in aggregate] aggregate_names = [aggregate['name'] for aggregate in host_aggregates if 'name' in aggregate] include_all_nodes = any(self._check_wildcard(field) for field in (aggregate_ids, aggregate_names)) for aggregate in aggregate_list: if (aggregate.id in aggregate_ids or aggregate.name in aggregate_names or include_all_nodes): compute_nodes.extend(aggregate.hosts) def _collect_zones(self, availability_zones, allowed_nodes): service_list = self.wrapper.get_service_list() zone_names = [zone['name'] for zone in availability_zones] include_all_nodes = False if '*' in zone_names: if len(zone_names) == 1: include_all_nodes = True else: raise exception.WildcardCharacterIsUsed( resource="availability zones") for service in service_list: if service.zone in zone_names or include_all_nodes: allowed_nodes.extend(service.host) def exclude_resources(self, resources, **kwargs): instances_to_exclude = kwargs.get('instances') nodes_to_exclude = kwargs.get('nodes') instance_metadata = kwargs.get('instance_metadata') projects_to_exclude = kwargs.get('projects') for resource in resources: if 'instances' in resource: instances_to_exclude.extend( [instance['uuid'] for instance in resource['instances']]) elif 'compute_nodes' in resource: nodes_to_exclude.extend( [host['name'] for host in resource['compute_nodes']]) elif 'host_aggregates' in resource: prohibited_nodes = [] self._collect_aggregates(resource['host_aggregates'], prohibited_nodes) nodes_to_exclude.extend(prohibited_nodes) elif 'instance_metadata' in resource: instance_metadata.extend( [metadata for metadata in resource['instance_metadata']]) elif 'projects' in resource: projects_to_exclude.extend( [project['uuid'] for project in resource['projects']]) def remove_nodes_from_model(self, nodes_to_remove, cluster_model): for node_name in nodes_to_remove: node = cluster_model.get_node_by_name(node_name) instances = cluster_model.get_node_instances(node) for instance in instances: self.remove_instance(cluster_model, instance, node.uuid) cluster_model.remove_node(node) def update_exclude_instance_in_model( self, instances_to_exclude, cluster_model): for instance_uuid in instances_to_exclude: try: node_uuid = cluster_model.get_node_by_instance_uuid( instance_uuid).uuid except exception.ComputeResourceNotFound: LOG.warning("The following instance %s cannot be found. " "It might be deleted from CDM along with node" " instance was hosted on.", instance_uuid) continue self.update_exclude_instance( cluster_model, cluster_model.get_instance_by_uuid(instance_uuid), node_uuid) def exclude_instances_with_given_metadata( self, instance_metadata, cluster_model, instances_to_remove): metadata_dict = { key: val for d in instance_metadata for key, val in d.items()} instances = cluster_model.get_all_instances() for uuid, instance in instances.items(): metadata = instance.metadata common_metadata = set(metadata_dict) & set(metadata) if common_metadata and len(common_metadata) == len(metadata_dict): for key, value in metadata_dict.items(): if str(value).lower() == str(metadata.get(key)).lower(): instances_to_remove.add(uuid) def exclude_instances_with_given_project( self, projects_to_exclude, cluster_model, instances_to_exclude): all_instances = cluster_model.get_all_instances() for uuid, instance in all_instances.items(): if instance.project_id in projects_to_exclude: instances_to_exclude.add(uuid) def get_scoped_model(self, cluster_model): """Leave only nodes and instances proposed in the audit scope""" if not cluster_model: return None allowed_nodes = [] nodes_to_exclude = [] nodes_to_remove = set() instances_to_exclude = [] instance_metadata = [] projects_to_exclude = [] compute_scope = [] found_nothing_flag = False model_hosts = [n.hostname for n in cluster_model.get_all_compute_nodes().values()] if not self.scope: return cluster_model for scope in self.scope: compute_scope = scope.get('compute') if compute_scope: break if not compute_scope: return cluster_model for rule in compute_scope: if 'host_aggregates' in rule: self._collect_aggregates(rule['host_aggregates'], allowed_nodes) if not allowed_nodes: found_nothing_flag = True elif 'availability_zones' in rule: self._collect_zones(rule['availability_zones'], allowed_nodes) if not allowed_nodes: found_nothing_flag = True elif 'exclude' in rule: self.exclude_resources( rule['exclude'], instances=instances_to_exclude, nodes=nodes_to_exclude, instance_metadata=instance_metadata, projects=projects_to_exclude) instances_to_exclude = set(instances_to_exclude) if allowed_nodes: nodes_to_remove = set(model_hosts) - set(allowed_nodes) # This branch means user set host_aggregates and/or availability_zones # but can't find any nodes, so we should remove all nodes. elif found_nothing_flag: nodes_to_remove = set(model_hosts) nodes_to_remove.update(nodes_to_exclude) self.remove_nodes_from_model(nodes_to_remove, cluster_model) if instance_metadata and self.config.check_optimize_metadata: self.exclude_instances_with_given_metadata( instance_metadata, cluster_model, instances_to_exclude) if projects_to_exclude: self.exclude_instances_with_given_project( projects_to_exclude, cluster_model, instances_to_exclude) self.update_exclude_instance_in_model(instances_to_exclude, cluster_model) return cluster_model ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/scope/storage.py0000664000175000017500000001442200000000000024566 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import cinder_helper from watcher.common import exception from watcher.decision_engine.scope import base class StorageScope(base.BaseScope): """Storage Audit Scope Handler""" def __init__(self, scope, config, osc=None): super(StorageScope, self).__init__(scope, config) self._osc = osc self.wrapper = cinder_helper.CinderHelper(osc=self._osc) def _collect_vtype(self, volume_types, allowed_nodes): service_list = self.wrapper.get_storage_node_list() vt_names = [volume_type['name'] for volume_type in volume_types] include_all_nodes = False if '*' in vt_names: if len(vt_names) == 1: include_all_nodes = True else: raise exception.WildcardCharacterIsUsed( resource="volume_types") for service in service_list: if include_all_nodes: allowed_nodes.append(service.host) continue backend = service.host.split('@')[1] v_types = self.wrapper.get_volume_type_by_backendname( backend) for volume_type in v_types: if volume_type in vt_names: # Note(adisky): It can generate duplicate values # but it will later converted to set allowed_nodes.append(service.host) def _collect_zones(self, availability_zones, allowed_nodes): service_list = self.wrapper.get_storage_node_list() zone_names = [zone['name'] for zone in availability_zones] include_all_nodes = False if '*' in zone_names: if len(zone_names) == 1: include_all_nodes = True else: raise exception.WildcardCharacterIsUsed( resource="availability zones") for service in service_list: if service.zone in zone_names or include_all_nodes: allowed_nodes.append(service.host) def exclude_resources(self, resources, **kwargs): pools_to_exclude = kwargs.get('pools') volumes_to_exclude = kwargs.get('volumes') projects_to_exclude = kwargs.get('projects') for resource in resources: if 'storage_pools' in resource: pools_to_exclude.extend( [storage_pool['name'] for storage_pool in resource['storage_pools']]) elif 'volumes' in resource: volumes_to_exclude.extend( [volume['uuid'] for volume in resource['volumes']]) elif 'projects' in resource: projects_to_exclude.extend( [project['uuid'] for project in resource['projects']]) def exclude_pools(self, pools_to_exclude, cluster_model): for pool_name in pools_to_exclude: pool = cluster_model.get_pool_by_pool_name(pool_name) volumes = cluster_model.get_pool_volumes(pool) for volume in volumes: cluster_model.remove_volume(volume) cluster_model.remove_pool(pool) def exclude_volumes(self, volumes_to_exclude, cluster_model): for volume_uuid in volumes_to_exclude: volume = cluster_model.get_volume_by_uuid(volume_uuid) cluster_model.remove_volume(volume) def exclude_projects(self, projects_to_exclude, cluster_model): all_volumes = cluster_model.get_all_volumes() for volume_uuid in all_volumes: volume = all_volumes.get(volume_uuid) if volume.project_id in projects_to_exclude: cluster_model.remove_volume(volume) def remove_nodes_from_model(self, nodes_to_remove, cluster_model): for hostname in nodes_to_remove: node = cluster_model.get_node_by_name(hostname) pools = cluster_model.get_node_pools(node) for pool in pools: volumes = cluster_model.get_pool_volumes(pool) for volume in volumes: cluster_model.remove_volume(volume) cluster_model.remove_pool(pool) cluster_model.remove_node(node) def get_scoped_model(self, cluster_model): """Leave only nodes, pools and volumes proposed in the audit scope""" if not cluster_model: return None allowed_nodes = [] nodes_to_remove = set() volumes_to_exclude = [] projects_to_exclude = [] pools_to_exclude = [] model_hosts = list(cluster_model.get_all_storage_nodes().keys()) storage_scope = [] for scope in self.scope: storage_scope = scope.get('storage') if storage_scope: break if not storage_scope: return cluster_model for rule in storage_scope: if 'volume_types' in rule: self._collect_vtype(rule['volume_types'], allowed_nodes, cluster_model) elif 'availability_zones' in rule: self._collect_zones(rule['availability_zones'], allowed_nodes) elif 'exclude' in rule: self.exclude_resources( rule['exclude'], pools=pools_to_exclude, volumes=volumes_to_exclude, projects=projects_to_exclude) if allowed_nodes: nodes_to_remove = set(model_hosts) - set(allowed_nodes) self.remove_nodes_from_model(nodes_to_remove, cluster_model) self.exclude_pools(pools_to_exclude, cluster_model) self.exclude_volumes(volumes_to_exclude, cluster_model) self.exclude_projects(projects_to_exclude, cluster_model) return cluster_model ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6351352 python_watcher-14.0.0/watcher/decision_engine/scoring/0000775000175000017500000000000000000000000023100 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/scoring/__init__.py0000664000175000017500000000000000000000000025177 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/scoring/base.py0000664000175000017500000001036500000000000024371 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from watcher.common.loader import loadable class ScoringEngine(loadable.Loadable, metaclass=abc.ABCMeta): """A base class for all the Scoring Engines. A Scoring Engine is an instance of a data model, to which the learning data was applied. Please note that this class contains non-static and non-class methods by design, so that it's easy to create multiple Scoring Engine instances using a single class (possibly configured differently). """ @abc.abstractmethod def get_name(self): """Returns the name of the Scoring Engine. The name should be unique across all Scoring Engines. :return: A Scoring Engine name :rtype: str """ @abc.abstractmethod def get_description(self): """Returns the description of the Scoring Engine. The description might contain any human readable information, which might be useful for Strategy developers planning to use this Scoring Engine. It will be also visible in the Watcher API and CLI. :return: A Scoring Engine description :rtype: str """ @abc.abstractmethod def get_metainfo(self): """Returns the metadata information about Scoring Engine. The metadata might contain a machine-friendly (e.g. in JSON format) information needed to use this Scoring Engine. For example, some Scoring Engines require to pass the array of features in particular order to be able to calculate the score value. This order can be defined in metadata and used in Strategy. :return: A Scoring Engine metadata :rtype: str """ @abc.abstractmethod def calculate_score(self, features): """Calculates a score value based on arguments passed. Scoring Engines might be very different to each other. They might solve different problems or use different algorithms or frameworks internally. To enable this kind of flexibility, the method takes only one argument (string) and produces the results in the same format (string). The consumer of the Scoring Engine is ultimately responsible for providing the right arguments and parsing the result. :param features: Input data for Scoring Engine :type features: str :return: A score result :rtype: str """ @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] class ScoringEngineContainer(loadable.Loadable, metaclass=abc.ABCMeta): """A base class for all the Scoring Engines Containers. A Scoring Engine Container is an abstraction which allows to plugin multiple Scoring Engines as a single Stevedore plugin. This enables some more advanced scenarios like dynamic reloading of Scoring Engine implementations without having to restart any Watcher services. """ @classmethod @abc.abstractmethod def get_scoring_engine_list(self): """Returns a list of Scoring Engine instances. :return: A list of Scoring Engine instances :rtype: :class: `~.scoring_engine.ScoringEngine` """ @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/scoring/dummy_scorer.py0000664000175000017500000001456700000000000026177 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import units from watcher._i18n import _ from watcher.decision_engine.scoring import base LOG = log.getLogger(__name__) class DummyScorer(base.ScoringEngine): """Sample Scoring Engine implementing simplified workload classification. Typically a scoring engine would be implemented using machine learning techniques. For example, for workload classification problem the solution could consist of the following steps: 1. Define a problem to solve: we want to detect the workload on the machine based on the collected metrics like power consumption, temperature, CPU load, memory usage, disk usage, network usage, etc. 2. The workloads could be predefined, e.g. IDLE, CPU-INTENSIVE, MEMORY-INTENSIVE, IO-BOUND, ... Or we could let the ML algorithm to find the workloads based on the learning data provided. The decision here leads to learning algorithm used (supervised vs. non-supervised learning). 3. Collect metrics from sample servers (learning data). 4. Define the analytical model, pick ML framework and algorithm. 5. Apply learning data to the data model. Once taught, the data model becomes a scoring engine and can start doing predictions or classifications. 6. Wrap up the scoring engine with the class like this one, so it has a standard interface and can be used inside Watcher. This class is a greatly very simplified version of the above model. The goal is to provide an example how such class could be implemented and used in Watcher, without adding additional dependencies like machine learning frameworks (which can be quite heavy) or over-complicating it's internal implementation, which can distract from looking at the overall picture. That said, this class implements a workload classification "manually" (in plain python code) and is not intended to be used in production. """ # Constants defining column indices for the input data PROCESSOR_TIME_PERC = 0 MEM_TOTAL_BYTES = 1 MEM_AVAIL_BYTES = 2 MEM_PAGE_READS_PER_SEC = 3 MEM_PAGE_WRITES_PER_SEC = 4 DISK_READ_BYTES_PER_SEC = 5 DISK_WRITE_BYTES_PER_SEC = 6 NET_BYTES_RECEIVED_PER_SEC = 7 NET_BYTES_SENT_PER_SEC = 8 # Types of workload WORKLOAD_IDLE = 0 WORKLOAD_CPU = 1 WORKLOAD_MEM = 2 WORKLOAD_DISK = 3 def get_name(self): return 'dummy_scorer' def get_description(self): return 'Dummy workload classifier' def get_metainfo(self): """Metadata about input/output format of this scoring engine. This information is used in strategy using this scoring engine to prepare the input information and to understand the results. """ return """{ "feature_columns": [ "proc-processor-time-%", "mem-total-bytes", "mem-avail-bytes", "mem-page-reads/sec", "mem-page-writes/sec", "disk-read-bytes/sec", "disk-write-bytes/sec", "net-bytes-received/sec", "net-bytes-sent/sec"], "result_columns": [ "workload", "idle-probability", "cpu-probability", "memory-probability", "disk-probability"], "workloads": [ "idle", "cpu-intensive", "memory-intensive", "disk-intensive"] }""" def calculate_score(self, features): """Arbitrary algorithm calculating the score. It demonstrates how to parse the input data (features) and serialize the results. It detects the workload type based on the metrics and also returns the probabilities of each workload detection (again, the arbitrary values are returned, just for demonstration how the "real" machine learning algorithm could work. For example, the Gradient Boosting Machine from H2O framework is using exactly the same format: http://www.h2o.ai/verticals/algos/gbm/ """ LOG.debug('Calculating score, features: %s', features) # By default IDLE workload will be returned workload = self.WORKLOAD_IDLE idle_prob = 0.0 cpu_prob = 0.0 mem_prob = 0.0 disk_prob = 0.0 # Basic input validation try: flist = jsonutils.loads(features) except Exception as e: raise ValueError(_('Unable to parse features: ') % e) if type(flist) is not list: raise ValueError(_('JSON list expected in feature argument')) if len(flist) != 9: raise ValueError(_('Invalid number of features, expected 9')) # Simple logic for workload classification if flist[self.PROCESSOR_TIME_PERC] >= 80: workload = self.WORKLOAD_CPU cpu_prob = 100.0 elif flist[self.MEM_PAGE_READS_PER_SEC] >= 1000 \ and flist[self.MEM_PAGE_WRITES_PER_SEC] >= 1000: workload = self.WORKLOAD_MEM mem_prob = 100.0 elif flist[self.DISK_READ_BYTES_PER_SEC] >= 50*units.Mi \ and flist[self.DISK_WRITE_BYTES_PER_SEC] >= 50*units.Mi: workload = self.WORKLOAD_DISK disk_prob = 100.0 else: idle_prob = 100.0 if flist[self.PROCESSOR_TIME_PERC] >= 40: cpu_prob = 50.0 if flist[self.MEM_PAGE_READS_PER_SEC] >= 500 \ or flist[self.MEM_PAGE_WRITES_PER_SEC] >= 500: mem_prob = 50.0 return jsonutils.dumps( [workload, idle_prob, cpu_prob, mem_prob, disk_prob]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/scoring/dummy_scoring_container.py0000664000175000017500000000651000000000000030375 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from oslo_serialization import jsonutils from watcher._i18n import _ from watcher.decision_engine.scoring import base LOG = log.getLogger(__name__) class DummyScoringContainer(base.ScoringEngineContainer): """Sample Scoring Engine container returning a list of scoring engines. Please note that it can be used in dynamic scenarios and the returned list might return instances based on some external configuration (e.g. in database). In order for these scoring engines to become discoverable in Watcher API and Watcher CLI, a database re-sync is required. It can be executed using watcher-sync tool for example. """ @classmethod def get_scoring_engine_list(cls): return [ SimpleFunctionScorer( 'dummy_min_scorer', 'Dummy Scorer calculating the minimum value', min), SimpleFunctionScorer( 'dummy_max_scorer', 'Dummy Scorer calculating the maximum value', max), SimpleFunctionScorer( 'dummy_avg_scorer', 'Dummy Scorer calculating the average value', lambda x: float(sum(x)) / len(x)), ] class SimpleFunctionScorer(base.ScoringEngine): """A simple generic scoring engine for demonstration purposes only. A generic scoring engine implementation, which is expecting a JSON formatted array of numbers to be passed as an input for score calculation. It then executes the aggregate function on this array and returns an array with a single aggregated number (also JSON formatted). """ def __init__(self, name, description, aggregate_function): super(SimpleFunctionScorer, self).__init__(config=None) self._name = name self._description = description self._aggregate_function = aggregate_function def get_name(self): return self._name def get_description(self): return self._description def get_metainfo(self): return '' def calculate_score(self, features): LOG.debug('Calculating score, features: %s', features) # Basic input validation try: flist = jsonutils.loads(features) except Exception as e: raise ValueError(_('Unable to parse features: %s') % e) if type(flist) is not list: raise ValueError(_('JSON list expected in feature argument')) if len(flist) < 1: raise ValueError(_('At least one feature is required')) # Calculate the result result = self._aggregate_function(flist) # Return the aggregated result return jsonutils.dumps([result]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/scoring/scoring_factory.py0000664000175000017500000000711700000000000026653 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A module providing helper methods to work with Scoring Engines. """ from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.loading import default LOG = log.getLogger(__name__) _scoring_engine_map = None def get_scoring_engine(scoring_engine_name): """Returns a Scoring Engine by its name. Method retrieves a Scoring Engine instance by its name. Scoring Engine instances are being cached in memory to avoid enumerating the Stevedore plugins on each call. When called for the first time, it reloads the cache. :return: A Scoring Engine instance with a given name :rtype: :class: `watcher.decision_engine.scoring.scoring_engine.ScoringEngine` """ global _scoring_engine_map _reload_scoring_engines() scoring_engine = _scoring_engine_map.get(scoring_engine_name) if scoring_engine is None: raise KeyError(_('Scoring Engine with name=%s not found') % scoring_engine_name) return scoring_engine def get_scoring_engine_list(): """Returns a list of Scoring Engine instances. The main use case for this method is discoverability, so the Scoring Engine list is always reloaded before returning any results. Frequent calling of this method might have a negative performance impact. :return: A list of all available Scoring Engine instances :rtype: List of :class: `watcher.decision_engine.scoring.scoring_engine.ScoringEngine` """ global _scoring_engine_map _reload_scoring_engines(True) return _scoring_engine_map.values() def _reload_scoring_engines(refresh=False): """Reloads Scoring Engines from Stevedore plugins to memory. Please note that two Stevedore entry points are used: - watcher_scoring_engines: for simple plugin implementations - watcher_scoring_engine_containers: for container plugins, which enable the dynamic scenarios (its get_scoring_engine_list method might return different values on each call) """ global _scoring_engine_map if _scoring_engine_map is None or refresh: LOG.debug("Reloading Scoring Engine plugins") engines = default.DefaultScoringLoader().list_available() _scoring_engine_map = dict() for name in engines.keys(): se_impl = default.DefaultScoringLoader().load(name) LOG.debug("Found Scoring Engine plugin: %s", se_impl.get_name()) _scoring_engine_map[se_impl.get_name()] = se_impl engine_containers = \ default.DefaultScoringContainerLoader().list_available() for container_id, container_cls in engine_containers.items(): LOG.debug("Found Scoring Engine container plugin: %s", container_id) for se in container_cls.get_scoring_engine_list(): LOG.debug("Found Scoring Engine plugin: %s", se.get_name()) _scoring_engine_map[se.get_name()] = se ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6351352 python_watcher-14.0.0/watcher/decision_engine/solution/0000775000175000017500000000000000000000000023310 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/solution/__init__.py0000664000175000017500000000000000000000000025407 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/solution/base.py0000664000175000017500000001115500000000000024577 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A :ref:`Solution ` is the result of execution of a :ref:`strategy ` (i.e., an algorithm). Each solution is composed of many pieces of information: - A set of :ref:`actions ` generated by the strategy in order to achieve the :ref:`goal ` of an associated :ref:`audit `. - A set of :ref:`efficacy indicators ` as defined by the associated goal - A :ref:`global efficacy ` which is computed by the associated goal using the aforementioned efficacy indicators. A :ref:`Solution ` is different from an :ref:`Action Plan ` because it contains the non-scheduled list of :ref:`Actions ` which is produced by a :ref:`Strategy `. In other words, the list of Actions in a :ref:`Solution ` has not yet been re-ordered by the :ref:`Watcher Planner `. Note that some algorithms (i.e. :ref:`Strategies `) may generate several :ref:`Solutions `. This gives rise to the problem of determining which :ref:`Solution ` should be applied. Two approaches to dealing with this can be envisaged: - **fully automated mode**: only the :ref:`Solution ` with the highest ranking (i.e., the highest :ref:`Optimization Efficacy `) will be sent to the :ref:`Watcher Planner ` and translated into concrete :ref:`Actions `. - **manual mode**: several :ref:`Solutions ` are proposed to the :ref:`Administrator ` with a detailed measurement of the estimated :ref:`Optimization Efficacy ` and he/she decides which one will be launched. """ import abc from watcher.decision_engine.solution import efficacy class BaseSolution(object, metaclass=abc.ABCMeta): def __init__(self, goal, strategy): """Base Solution constructor :param goal: Goal associated to this solution :type goal: :py:class:`~.base.Goal` instance :param strategy: Strategy associated to this solution :type strategy: :py:class:`~.BaseStrategy` instance """ self.goal = goal self._strategy = strategy self.origin = None self.model = None self.efficacy = efficacy.Efficacy(self.goal, self.strategy) @property def global_efficacy(self): return self.efficacy.global_efficacy @property def efficacy_indicators(self): return self.efficacy.indicators @property def strategy(self): return self._strategy def compute_global_efficacy(self): """Compute the global efficacy given a map of efficacy indicators""" self.efficacy.compute_global_efficacy() def set_efficacy_indicators(self, **indicators_map): """Set the efficacy indicators mapping (no validation) :param indicators_map: mapping between the indicator name and its value :type indicators_map: dict {`str`: `object`} """ self.efficacy.set_efficacy_indicators(**indicators_map) @abc.abstractmethod def add_action(self, action_type, resource_id, input_parameters=None): """Add a new Action in the Solution :param action_type: the unique id of an action type defined in entry point 'watcher_actions' :param resource_id: the unique id of the resource to which the `Action` applies. :param input_parameters: An array of input parameters provided as key-value pairs of strings. Each key-pair contains names and values that match what was previously defined in the `Action` type schema. """ raise NotImplementedError() @property @abc.abstractmethod def actions(self): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/solution/default.py0000664000175000017500000000466000000000000025314 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.applier.actions import base as baction from watcher.common import exception from watcher.decision_engine.solution import base LOG = log.getLogger(__name__) class DefaultSolution(base.BaseSolution): def __init__(self, goal, strategy): """Stores a set of actions generated by a strategy The DefaultSolution class store a set of actions generated by a strategy in order to achieve the goal. :param goal: Goal associated to this solution :type goal: :py:class:`~.base.Goal` instance :param strategy: Strategy associated to this solution :type strategy: :py:class:`~.BaseStrategy` instance """ super(DefaultSolution, self).__init__(goal, strategy) self._actions = [] def add_action(self, action_type, input_parameters=None, resource_id=None): if input_parameters is not None: if baction.BaseAction.RESOURCE_ID in input_parameters.keys(): raise exception.ReservedWord(name=baction.BaseAction. RESOURCE_ID) else: input_parameters = {} if resource_id is not None: input_parameters[baction.BaseAction.RESOURCE_ID] = resource_id action = { 'action_type': action_type, 'input_parameters': input_parameters } if action not in self._actions: self._actions.append(action) else: LOG.warning('Action %s has been added into the solution, ' 'duplicate action will be dropped.', str(action)) def __str__(self): return "\n".join(self._actions) @property def actions(self): """Get the current actions of the solution""" return self._actions ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/solution/efficacy.py0000664000175000017500000000706400000000000025442 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import numbers from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.common import utils LOG = log.getLogger(__name__) class IndicatorsMap(utils.Struct): pass class Indicator(utils.Struct): def __init__(self, name, description, unit, value): super(Indicator, self).__init__() self.name = name self.description = description self.unit = unit if not isinstance(value, numbers.Number): raise exception.InvalidIndicatorValue( _("An indicator value should be a number")) self.value = value class Efficacy(object): """Solution efficacy""" def __init__(self, goal, strategy): """Solution efficacy :param goal: Goal associated to this solution :type goal: :py:class:`~.base.Goal` instance :param strategy: Strategy associated to this solution :type strategy: :py:class:`~.BaseStrategy` instance """ self.goal = goal self.strategy = strategy self._efficacy_spec = self.goal.efficacy_specification # Used to store in DB the info related to the efficacy indicators self.indicators = [] # Used to compute the global efficacy self._indicators_mapping = IndicatorsMap() self.global_efficacy = [] def set_efficacy_indicators(self, **indicators_map): """Set the efficacy indicators :param indicators_map: kwargs where the key is the name of the efficacy indicator as defined in the associated :py:class:`~.IndicatorSpecification` and the value is a number. :type indicators_map: dict {str: numerical value} """ self._indicators_mapping.update(indicators_map) def compute_global_efficacy(self): self._efficacy_spec.validate_efficacy_indicators( self._indicators_mapping) try: self.global_efficacy = ( self._efficacy_spec.get_global_efficacy_indicator( self._indicators_mapping)) indicators_specs_map = { indicator_spec.name: indicator_spec for indicator_spec in self._efficacy_spec.indicators_specs} indicators = [] for indicator_name, value in self._indicators_mapping.items(): related_indicator_spec = indicators_specs_map[indicator_name] indicators.append( Indicator( name=related_indicator_spec.name, description=related_indicator_spec.description, unit=related_indicator_spec.unit, value=value)) self.indicators = indicators except Exception as exc: LOG.exception(exc) raise exception.GlobalEfficacyComputationError( goal=self.goal.name, strategy=self.strategy.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/solution/solution_comparator.py0000664000175000017500000000150000000000000027761 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc class BaseSolutionComparator(object, metaclass=abc.ABCMeta): @abc.abstractmethod def compare(self, sol1, sol2): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/solution/solution_evaluator.py0000664000175000017500000000147600000000000027630 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc class BaseSolutionEvaluator(object, metaclass=abc.ABCMeta): @abc.abstractmethod def evaluate(self, solution): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6351352 python_watcher-14.0.0/watcher/decision_engine/strategy/0000775000175000017500000000000000000000000023276 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/__init__.py0000664000175000017500000000000000000000000025375 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6351352 python_watcher-14.0.0/watcher/decision_engine/strategy/common/0000775000175000017500000000000000000000000024566 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/common/__init__.py0000664000175000017500000000000000000000000026665 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/common/level.py0000664000175000017500000000146600000000000026256 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import enum class StrategyLevel(enum.Enum): conservative = "conservative" balanced = "balanced" growth = "growth" aggressive = "aggressive" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6351352 python_watcher-14.0.0/watcher/decision_engine/strategy/context/0000775000175000017500000000000000000000000024762 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/context/__init__.py0000664000175000017500000000000000000000000027061 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/context/base.py0000664000175000017500000000513600000000000026253 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from watcher import notifications from watcher.objects import fields class StrategyContext(object, metaclass=abc.ABCMeta): def execute_strategy(self, audit, request_context): """Execute the strategy for the given an audit :param audit: Audit object :type audit: :py:class:`~.objects.audit.Audit` instance :param request_context: Current request context :type request_context: :py:class:`~.RequestContext` instance :returns: The computed solution :rtype: :py:class:`~.BaseSolution` instance """ try: notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.STRATEGY, phase=fields.NotificationPhase.START) solution = self.do_execute_strategy(audit, request_context) notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.STRATEGY, phase=fields.NotificationPhase.END) return solution except Exception: notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.STRATEGY, priority=fields.NotificationPriority.ERROR, phase=fields.NotificationPhase.ERROR) raise @abc.abstractmethod def do_execute_strategy(self, audit, request_context): """Execute the strategy for the given an audit :param audit: Audit object :type audit: :py:class:`~.objects.audit.Audit` instance :param request_context: Current request context :type request_context: :py:class:`~.RequestContext` instance :returns: The computed solution :rtype: :py:class:`~.BaseSolution` instance """ raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/context/default.py0000664000175000017500000000503700000000000026765 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from watcher.common import clients from watcher.common import utils from watcher.decision_engine.strategy.context import base from watcher.decision_engine.strategy.selection import default from watcher import objects LOG = log.getLogger(__name__) class DefaultStrategyContext(base.StrategyContext): def __init__(self): super(DefaultStrategyContext, self).__init__() LOG.debug("Initializing Strategy Context") @staticmethod def select_strategy(audit, request_context): osc = clients.OpenStackClients() # todo(jed) retrieve in audit parameters (threshold,...) # todo(jed) create ActionPlan goal = objects.Goal.get_by_id(request_context, audit.goal_id) # NOTE(jed56) In the audit object, the 'strategy_id' attribute # is optional. If the admin wants to force the trigger of a Strategy # it could specify the Strategy uuid in the Audit. strategy_name = None if audit.strategy_id: strategy = objects.Strategy.get_by_id( request_context, audit.strategy_id) strategy_name = strategy.name strategy_selector = default.DefaultStrategySelector( goal_name=goal.name, strategy_name=strategy_name, osc=osc) return strategy_selector.select() def do_execute_strategy(self, audit, request_context): selected_strategy = self.select_strategy(audit, request_context) selected_strategy.audit_scope = audit.scope schema = selected_strategy.get_schema() if not audit.parameters and schema: # Default value feedback if no predefined strategy utils.StrictDefaultValidatingDraft4Validator(schema).validate( audit.parameters) selected_strategy.input_parameters.update({ name: value for name, value in audit.parameters.items() }) return selected_strategy.execute(audit=audit) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6351352 python_watcher-14.0.0/watcher/decision_engine/strategy/selection/0000775000175000017500000000000000000000000025263 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/selection/__init__.py0000664000175000017500000000000000000000000027362 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/selection/base.py0000664000175000017500000000145200000000000026551 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc class BaseSelector(object, metaclass=abc.ABCMeta): @abc.abstractmethod def select(self): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/selection/default.py0000664000175000017500000000531200000000000027262 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.loading import default from watcher.decision_engine.strategy.selection import base LOG = log.getLogger(__name__) class DefaultStrategySelector(base.BaseSelector): def __init__(self, goal_name, strategy_name=None, osc=None): """Default strategy selector :param goal_name: Name of the goal :param strategy_name: Name of the strategy :param osc: an OpenStackClients instance """ super(DefaultStrategySelector, self).__init__() self.goal_name = goal_name self.strategy_name = strategy_name self.osc = osc self.strategy_loader = default.DefaultStrategyLoader() def select(self): """Selects a strategy :raises: :py:class:`~.LoadingError` if it failed to load a strategy :returns: A :py:class:`~.BaseStrategy` instance """ strategy_to_load = None try: if self.strategy_name: strategy_to_load = self.strategy_name else: available_strategies = self.strategy_loader.list_available() available_strategies_for_goal = list( key for key, strategy in available_strategies.items() if strategy.get_goal_name() == self.goal_name) if not available_strategies_for_goal: raise exception.NoAvailableStrategyForGoal( goal=self.goal_name) # TODO(v-francoise): We should do some more work here to select # a strategy out of a given goal instead of just choosing the # 1st one strategy_to_load = available_strategies_for_goal[0] return self.strategy_loader.load(strategy_to_load, osc=self.osc) except exception.NoAvailableStrategyForGoal: raise except Exception as exc: LOG.exception(exc) raise exception.LoadingError( _("Could not load any strategy for goal %(goal)s"), goal=self.goal_name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6391354 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/0000775000175000017500000000000000000000000025450 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/__init__.py0000664000175000017500000000575500000000000027575 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.strategy.strategies import actuation from watcher.decision_engine.strategy.strategies import base from watcher.decision_engine.strategy.strategies import basic_consolidation from watcher.decision_engine.strategy.strategies import dummy_strategy from watcher.decision_engine.strategy.strategies import dummy_with_scorer from watcher.decision_engine.strategy.strategies import host_maintenance from watcher.decision_engine.strategy.strategies import \ node_resource_consolidation from watcher.decision_engine.strategy.strategies import noisy_neighbor from watcher.decision_engine.strategy.strategies import outlet_temp_control from watcher.decision_engine.strategy.strategies import saving_energy from watcher.decision_engine.strategy.strategies import \ storage_capacity_balance from watcher.decision_engine.strategy.strategies import uniform_airflow from watcher.decision_engine.strategy.strategies import \ vm_workload_consolidation from watcher.decision_engine.strategy.strategies import workload_balance from watcher.decision_engine.strategy.strategies import workload_stabilization from watcher.decision_engine.strategy.strategies import zone_migration Actuator = actuation.Actuator BaseStrategy = base.BaseStrategy BasicConsolidation = basic_consolidation.BasicConsolidation OutletTempControl = outlet_temp_control.OutletTempControl DummyStrategy = dummy_strategy.DummyStrategy DummyWithScorer = dummy_with_scorer.DummyWithScorer SavingEnergy = saving_energy.SavingEnergy StorageCapacityBalance = storage_capacity_balance.StorageCapacityBalance VMWorkloadConsolidation = vm_workload_consolidation.VMWorkloadConsolidation WorkloadBalance = workload_balance.WorkloadBalance WorkloadStabilization = workload_stabilization.WorkloadStabilization UniformAirflow = uniform_airflow.UniformAirflow NodeResourceConsolidation = ( node_resource_consolidation.NodeResourceConsolidation) NoisyNeighbor = noisy_neighbor.NoisyNeighbor ZoneMigration = zone_migration.ZoneMigration HostMaintenance = host_maintenance.HostMaintenance __all__ = ("Actuator", "BaseStrategy", "BasicConsolidation", "OutletTempControl", "DummyStrategy", "DummyWithScorer", "VMWorkloadConsolidation", "WorkloadBalance", "WorkloadStabilization", "UniformAirflow", "NoisyNeighbor", "SavingEnergy", "StorageCapacityBalance", "ZoneMigration", "HostMaintenance", "NodeResourceConsolidation") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/actuation.py0000664000175000017500000000570100000000000030014 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from watcher._i18n import _ from watcher.decision_engine.strategy.strategies import base class Actuator(base.UnclassifiedStrategy): """Actuator Actuator that simply executes the actions given as parameter This strategy allows anyone to create an action plan with a predefined set of actions. This strategy can be used for 2 different purposes: - Test actions - Use this strategy based on an event trigger to perform some explicit task """ @classmethod def get_name(cls): return "actuator" @classmethod def get_display_name(cls): return _("Actuator") @classmethod def get_translatable_display_name(cls): return "Actuator" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "$schema": "http://json-schema.org/draft-04/schema#", "type": "object", "properties": { "actions": { "type": "array", "items": { "type": "object", "properties": { "action_type": { "type": "string" }, "resource_id": { "type": "string" }, "input_parameters": { "type": "object", "properties": {}, "additionalProperties": True } }, "required": [ "action_type", "input_parameters" ], "additionalProperties": True, } } }, "required": [ "actions" ] } @classmethod def get_config_opts(cls): """Override base class config options as do not use datasource """ return [] @property def actions(self): return self.input_parameters.get('actions', []) def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): for action in self.actions: self.solution.add_action(**action) def post_execute(self): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/base.py0000664000175000017500000004344100000000000026742 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :ref:`Strategy ` is an algorithm implementation which is able to find a :ref:`Solution ` for a given :ref:`Goal `. There may be several potential strategies which are able to achieve the same :ref:`Goal `. This is why it is possible to configure which specific :ref:`Strategy ` should be used for each :ref:`Goal `. Some strategies may provide better optimization results but may take more time to find an optimal :ref:`Solution `. When a new :ref:`Goal ` is added to the Watcher configuration, at least one default associated :ref:`Strategy ` should be provided as well. :ref:`Some default implementations are provided `, but it is possible to :ref:`develop new implementations ` which are dynamically loaded by Watcher at launch time. """ import abc from oslo_config import cfg from oslo_log import log from oslo_utils import strutils from watcher.common import clients from watcher.common import context from watcher.common import exception from watcher.common.loader import loadable from watcher.common import utils from watcher.decision_engine.datasources import manager as ds_manager from watcher.decision_engine.loading import default as loading from watcher.decision_engine.model.collector import manager from watcher.decision_engine.solution import default from watcher.decision_engine.strategy.common import level LOG = log.getLogger(__name__) CONF = cfg.CONF class StrategyEndpoint(object): def __init__(self, messaging): self._messaging = messaging def _collect_metrics(self, strategy, datasource): metrics = [] if not datasource: return {'type': 'Metrics', 'state': metrics, 'mandatory': False, 'comment': ''} else: ds_metrics = datasource.list_metrics() if ds_metrics is None: raise exception.DataSourceNotAvailable( datasource=datasource.NAME) else: for metric in strategy.DATASOURCE_METRICS: original_metric_name = datasource.METRIC_MAP.get(metric) if original_metric_name in ds_metrics: metrics.append({original_metric_name: 'available'}) else: metrics.append({original_metric_name: 'not available'}) return {'type': 'Metrics', 'state': metrics, 'mandatory': False, 'comment': ''} def _get_datasource_status(self, strategy, datasource): if not datasource: state = "Datasource is not presented for this strategy" else: state = "%s: %s" % (datasource.NAME, datasource.check_availability()) return {'type': 'Datasource', 'state': state, 'mandatory': True, 'comment': ''} def _get_cdm(self, strategy): models = [] for model in ['compute_model', 'storage_model', 'baremetal_model']: try: getattr(strategy, model) except Exception: models.append({model: 'not available'}) else: models.append({model: 'available'}) return {'type': 'CDM', 'state': models, 'mandatory': True, 'comment': ''} def get_strategy_info(self, context, strategy_name): strategy = loading.DefaultStrategyLoader().load(strategy_name) try: is_datasources = getattr(strategy.config, 'datasources', None) if is_datasources: datasource = getattr(strategy, 'datasource_backend') else: datasource = getattr(strategy, strategy.config.datasource) except (AttributeError, IndexError): datasource = [] available_datasource = self._get_datasource_status(strategy, datasource) available_metrics = self._collect_metrics(strategy, datasource) available_cdm = self._get_cdm(strategy) return [available_datasource, available_metrics, available_cdm] class BaseStrategy(loadable.Loadable, metaclass=abc.ABCMeta): """A base class for all the strategies A Strategy is an algorithm implementation which is able to find a Solution for a given Goal. """ DATASOURCE_METRICS = [] """Contains all metrics the strategy requires from a datasource to properly execute""" MIGRATION = "migrate" def __init__(self, config, osc=None): """Constructor: the signature should be identical within the subclasses :param config: Configuration related to this plugin :type config: :py:class:`~.Struct` :param osc: An OpenStackClients instance :type osc: :py:class:`~.OpenStackClients` instance """ super(BaseStrategy, self).__init__(config) self.ctx = context.make_context() self._name = self.get_name() self._display_name = self.get_display_name() self._goal = self.get_goal() # default strategy level self._strategy_level = level.StrategyLevel.conservative self._cluster_state_collector = None # the solution given by the strategy self._solution = default.DefaultSolution(goal=self.goal, strategy=self) self._osc = osc self._collector_manager = None self._compute_model = None self._storage_model = None self._baremetal_model = None self._input_parameters = utils.Struct() self._audit_scope = None self._datasource_backend = None self._planner = 'weight' @classmethod @abc.abstractmethod def get_name(cls): """The name of the strategy""" raise NotImplementedError() @classmethod @abc.abstractmethod def get_display_name(cls): """The goal display name for the strategy""" raise NotImplementedError() @classmethod @abc.abstractmethod def get_translatable_display_name(cls): """The translatable msgid of the strategy""" # Note(v-francoise): Defined here to be used as the translation key for # other services raise NotImplementedError() @classmethod @abc.abstractmethod def get_goal_name(cls): """The goal name the strategy achieves""" raise NotImplementedError() @classmethod def get_goal(cls): """The goal the strategy achieves""" goal_loader = loading.DefaultGoalLoader() return goal_loader.load(cls.get_goal_name()) @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ datasources_ops = list(ds_manager.DataSourceManager.metric_map.keys()) return [ cfg.ListOpt( "datasources", help="Datasources to use in order to query the needed metrics." " This option overrides the global preference." " options: {0}".format(datasources_ops), item_type=cfg.types.String(choices=datasources_ops), default=None) ] @abc.abstractmethod def pre_execute(self): """Pre-execution phase This can be used to fetch some pre-requisites or data. """ raise NotImplementedError() @abc.abstractmethod def do_execute(self, audit=None): """Strategy execution phase :param audit: An Audit instance :type audit: :py:class:`~.Audit` instance This phase is where you should put the main logic of your strategy. """ raise NotImplementedError() @abc.abstractmethod def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ raise NotImplementedError() def _pre_execute(self): """Base Pre-execution phase This will perform basic pre execution operations most strategies should perform. """ LOG.info("Initializing " + self.get_display_name()) if not self.compute_model: raise exception.ClusterStateNotDefined() LOG.debug(self.compute_model.to_string()) def execute(self, audit=None): """Execute a strategy :param audit: An Audit instance :type audit: :py:class:`~.Audit` instance :return: A computed solution (via a placement algorithm) :rtype: :py:class:`~.BaseSolution` instance """ self.pre_execute() self.do_execute(audit=audit) self.post_execute() self.solution.compute_global_efficacy() return self.solution @property def collector_manager(self): if self._collector_manager is None: self._collector_manager = manager.CollectorManager() return self._collector_manager @property def compute_model(self): """Cluster data model :returns: Cluster data model the strategy is executed on :rtype model: :py:class:`~.ModelRoot` instance """ if self._compute_model is None: collector = self.collector_manager.get_cluster_model_collector( 'compute', osc=self.osc) audit_scope_handler = collector.get_audit_scope_handler( audit_scope=self.audit_scope) self._compute_model = audit_scope_handler.get_scoped_model( collector.get_latest_cluster_data_model()) if not self._compute_model: raise exception.ClusterStateNotDefined() if self._compute_model.stale: raise exception.ClusterStateStale() return self._compute_model @property def storage_model(self): """Cluster data model :returns: Cluster data model the strategy is executed on :rtype model: :py:class:`~.ModelRoot` instance """ if self._storage_model is None: collector = self.collector_manager.get_cluster_model_collector( 'storage', osc=self.osc) audit_scope_handler = collector.get_audit_scope_handler( audit_scope=self.audit_scope) self._storage_model = audit_scope_handler.get_scoped_model( collector.get_latest_cluster_data_model()) if not self._storage_model: raise exception.ClusterStateNotDefined() if self._storage_model.stale: raise exception.ClusterStateStale() return self._storage_model @property def baremetal_model(self): """Cluster data model :returns: Cluster data model the strategy is executed on :rtype model: :py:class:`~.ModelRoot` instance """ if self._baremetal_model is None: collector = self.collector_manager.get_cluster_model_collector( 'baremetal', osc=self.osc) audit_scope_handler = collector.get_audit_scope_handler( audit_scope=self.audit_scope) self._baremetal_model = audit_scope_handler.get_scoped_model( collector.get_latest_cluster_data_model()) if not self._baremetal_model: raise exception.ClusterStateNotDefined() if self._baremetal_model.stale: raise exception.ClusterStateStale() return self._baremetal_model @classmethod def get_schema(cls): """Defines a Schema that the input parameters shall comply to :return: A jsonschema format (mandatory default setting) :rtype: dict """ return {} @property def datasource_backend(self): if not self._datasource_backend: # Load the global preferred datasources order but override it # if the strategy has a specific datasources config datasources = CONF.watcher_datasources if self.config.datasources: datasources = self.config self._datasource_backend = ds_manager.DataSourceManager( config=datasources, osc=self.osc ).get_backend(self.DATASOURCE_METRICS) return self._datasource_backend @property def input_parameters(self): return self._input_parameters @input_parameters.setter def input_parameters(self, p): self._input_parameters = p @property def osc(self): if not self._osc: self._osc = clients.OpenStackClients() return self._osc @property def solution(self): return self._solution @solution.setter def solution(self, s): self._solution = s @property def audit_scope(self): return self._audit_scope @audit_scope.setter def audit_scope(self, s): self._audit_scope = s @property def name(self): return self._name @property def display_name(self): return self._display_name @property def goal(self): return self._goal @property def strategy_level(self): return self._strategy_level @strategy_level.setter def strategy_level(self, s): self._strategy_level = s @property def state_collector(self): return self._cluster_state_collector @state_collector.setter def state_collector(self, s): self._cluster_state_collector = s @property def planner(self): return self._planner @planner.setter def planner(self, s): self._planner = s def filter_instances_by_audit_tag(self, instances): if not self.config.check_optimize_metadata: return instances instances_to_migrate = [] for instance in instances: optimize = True if instance.metadata: try: optimize = strutils.bool_from_string( instance.metadata.get('optimize')) except ValueError: optimize = False if optimize: instances_to_migrate.append(instance) return instances_to_migrate def add_action_migrate(self, instance, migration_type, source_node, destination_node): parameters = {'migration_type': migration_type, 'source_node': source_node.hostname, 'destination_node': destination_node.hostname, 'resource_name': instance.name} self.solution.add_action(action_type=self.MIGRATION, resource_id=instance.uuid, input_parameters=parameters) class DummyBaseStrategy(BaseStrategy, metaclass=abc.ABCMeta): @classmethod def get_goal_name(cls): return "dummy" @classmethod def get_config_opts(cls): """Override base class config options as do not use datasource """ return [] class UnclassifiedStrategy(BaseStrategy, metaclass=abc.ABCMeta): """This base class is used to ease the development of new strategies The goal defined within this strategy can be used to simplify the documentation explaining how to implement a new strategy plugin by omitting the need for the strategy developer to define a goal straight away. """ @classmethod def get_goal_name(cls): return "unclassified" class ServerConsolidationBaseStrategy(BaseStrategy, metaclass=abc.ABCMeta): REASON_FOR_DISABLE = 'watcher_disabled' @classmethod def get_goal_name(cls): return "server_consolidation" class ThermalOptimizationBaseStrategy(BaseStrategy, metaclass=abc.ABCMeta): @classmethod def get_goal_name(cls): return "thermal_optimization" class WorkloadStabilizationBaseStrategy(BaseStrategy, metaclass=abc.ABCMeta): def __init__(self, *args, **kwargs): super(WorkloadStabilizationBaseStrategy, self ).__init__(*args, **kwargs) self._planner = 'workload_stabilization' @classmethod def get_goal_name(cls): return "workload_balancing" class NoisyNeighborBaseStrategy(BaseStrategy, metaclass=abc.ABCMeta): @classmethod def get_goal_name(cls): return "noisy_neighbor" class SavingEnergyBaseStrategy(BaseStrategy, metaclass=abc.ABCMeta): @classmethod def get_goal_name(cls): return "saving_energy" @classmethod def get_config_opts(cls): """Override base class config options as do not use datasource """ return [] class ZoneMigrationBaseStrategy(BaseStrategy, metaclass=abc.ABCMeta): @classmethod def get_goal_name(cls): return "hardware_maintenance" @classmethod def get_config_opts(cls): """Override base class config options as do not use datasource """ return [] class HostMaintenanceBaseStrategy(BaseStrategy, metaclass=abc.ABCMeta): REASON_FOR_MAINTAINING = 'watcher_maintaining' @classmethod def get_goal_name(cls): return "cluster_maintaining" @classmethod def get_config_opts(cls): """Override base class config options as do not use datasource """ return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/basic_consolidation.py0000664000175000017500000004401700000000000032036 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class BasicConsolidation(base.ServerConsolidationBaseStrategy): """Good server consolidation strategy Basic offline consolidation using live migration Consolidation of VMs is essential to achieve energy optimization in cloud environments such as OpenStack. As VMs are spinned up and/or moved over time, it becomes necessary to migrate VMs among servers to lower the costs. However, migration of VMs introduces runtime overheads and consumes extra energy, thus a good server consolidation strategy should carefully plan for migration in order to both minimize energy consumption and comply to the various SLAs. This algorithm not only minimizes the overall number of used servers, but also minimizes the number of migrations. It has been developed only for tests. You must have at least 2 physical compute nodes to run it, so you can easily run it on DevStack. It assumes that live migration is possible on your OpenStack cluster. """ DATASOURCE_METRICS = ['host_cpu_usage', 'instance_cpu_usage'] CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" def __init__(self, config, osc=None): """Basic offline Consolidation using live migration :param config: A mapping containing the configuration of this strategy :type config: :py:class:`~.Struct` instance :param osc: :py:class:`~.OpenStackClients` instance """ super(BasicConsolidation, self).__init__(config, osc) # set default value for the number of enabled compute nodes self.number_of_enabled_nodes = 0 # set default value for the number of released nodes self.number_of_released_nodes = 0 # set default value for the number of migrations self.number_of_migrations = 0 # set default value for the efficacy self.efficacy = 100 # TODO(jed): improve threshold overbooking? self.threshold_mem = 1 self.threshold_disk = 1 self.threshold_cores = 1 @classmethod def get_name(cls): return "basic" @property def migration_attempts(self): return self.input_parameters.get('migration_attempts', 0) @property def period(self): return self.input_parameters.get('period', 7200) @property def granularity(self): return self.input_parameters.get('granularity', 300) @property def aggregation_method(self): return self.input_parameters.get( 'aggregation_method', { "instance": 'mean', "compute_node": 'mean', "node": '' } ) @classmethod def get_display_name(cls): return _("Basic offline consolidation") @classmethod def get_translatable_display_name(cls): return "Basic offline consolidation" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "migration_attempts": { "description": "Maximum number of combinations to be " "tried by the strategy while searching " "for potential candidates. To remove the " "limit, set it to 0 (by default)", "type": "number", "default": 0 }, "period": { "description": "The time interval in seconds for " "getting statistic aggregation", "type": "number", "default": 7200 }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "default": 300 }, "aggregation_method": { "description": "Function used to aggregate multiple " "measures into an aggregate. For example, " "the min aggregation method will aggregate " "the values of different measures to the " "minimum value of all the measures in the " "time range.", "type": "object", "properties": { "instance": { "type": "string", "default": 'mean' }, "compute_node": { "type": "string", "default": 'mean' }, "node": { "type": "string", # node is deprecated "default": '' }, }, "default": { "instance": 'mean', "compute_node": 'mean', # node is deprecated "node": '', } }, }, } @classmethod def get_config_opts(cls): return super(BasicConsolidation, cls).get_config_opts() + [ cfg.BoolOpt( 'check_optimize_metadata', help='Check optimize metadata field in instance before' ' migration', default=False), ] def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value, element.ServiceState.DISABLED.value] return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def check_migration(self, source_node, destination_node, instance_to_migrate): """Check if the migration is possible :param source_node: the current node of the virtual machine :param destination_node: the destination of the virtual machine :param instance_to_migrate: the instance / virtual machine :return: True if there is enough place otherwise false """ if source_node == destination_node: return False LOG.debug('Migrate instance %s from %s to %s', instance_to_migrate, source_node, destination_node) used_resources = self.compute_model.get_node_used_resources( destination_node) # capacity requested by the compute node total_cores = used_resources['vcpu'] + instance_to_migrate.vcpus total_disk = used_resources['disk'] + instance_to_migrate.disk total_mem = used_resources['memory'] + instance_to_migrate.memory return self.check_threshold(destination_node, total_cores, total_disk, total_mem) def check_threshold(self, destination_node, total_cores, total_disk, total_mem): """Check threshold Check the threshold value defined by the ratio of aggregated CPU capacity of VMs on one node to CPU capacity of this node must not exceed the threshold value. :param destination_node: the destination of the virtual machine :param total_cores: total cores of the virtual machine :param total_disk: total disk size used by the virtual machine :param total_mem: total memory used by the virtual machine :return: True if the threshold is not exceed """ cpu_capacity = destination_node.vcpu_capacity disk_capacity = destination_node.disk_gb_capacity memory_capacity = destination_node.memory_mb_capacity return (cpu_capacity >= total_cores * self.threshold_cores and disk_capacity >= total_disk * self.threshold_disk and memory_capacity >= total_mem * self.threshold_mem) def calculate_weight(self, compute_resource, total_cores_used, total_disk_used, total_memory_used): """Calculate weight of every resource :param compute_resource: :param total_cores_used: :param total_disk_used: :param total_memory_used: :return: """ cpu_capacity = compute_resource.vcpus disk_capacity = compute_resource.disk memory_capacity = compute_resource.memory score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) / float(cpu_capacity)) # It's possible that disk_capacity is 0, e.g., m1.nano.disk = 0 if disk_capacity == 0: score_disk = 0 else: score_disk = (1 - (float(disk_capacity) - float(total_disk_used)) / float(disk_capacity)) score_memory = ( 1 - (float(memory_capacity) - float(total_memory_used)) / float(memory_capacity)) # TODO(jed): take in account weight return (score_cores + score_disk + score_memory) / 3 def get_compute_node_cpu_usage(self, compute_node): return self.datasource_backend.get_host_cpu_usage( compute_node, self.period, self.aggregation_method['compute_node'], self.granularity) def get_instance_cpu_usage(self, instance): return self.datasource_backend.get_instance_cpu_usage( instance, self.period, self.aggregation_method['instance'], self.granularity) def calculate_score_node(self, node): """Calculate the score that represent the utilization level :param node: :py:class:`~.ComputeNode` instance :return: Score for the given compute node :rtype: float """ host_avg_cpu_util = self.get_compute_node_cpu_usage(node) if host_avg_cpu_util is None: resource_id = "%s_%s" % (node.uuid, node.hostname) LOG.error( "No values returned by %(resource_id)s " "for %(metric_name)s", dict( resource_id=resource_id, metric_name='host_cpu_usage')) host_avg_cpu_util = 100 total_cores_used = node.vcpus * (host_avg_cpu_util / 100.0) return self.calculate_weight(node, total_cores_used, 0, 0) def calculate_score_instance(self, instance): """Calculate Score of virtual machine :param instance: the virtual machine :return: score """ instance_cpu_utilization = self.get_instance_cpu_usage(instance) if instance_cpu_utilization is None: LOG.error( "No values returned by %(resource_id)s " "for %(metric_name)s", dict( resource_id=instance.uuid, metric_name='instance_cpu_usage')) instance_cpu_utilization = 100 total_cores_used = instance.vcpus * (instance_cpu_utilization / 100.0) return self.calculate_weight(instance, total_cores_used, 0, 0) def add_action_disable_node(self, node): parameters = {'state': element.ServiceState.DISABLED.value, 'disabled_reason': self.REASON_FOR_DISABLE, 'resource_name': node.hostname} self.solution.add_action(action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=parameters) def compute_score_of_nodes(self): """Calculate score of nodes based on load by VMs""" score = [] for node in self.get_available_compute_nodes().values(): if node.status == element.ServiceState.ENABLED.value: self.number_of_enabled_nodes += 1 instances = self.compute_model.get_node_instances(node) if len(instances) > 0: result = self.calculate_score_node(node) score.append((node.uuid, result)) return score def node_and_instance_score(self, sorted_scores): """Get List of VMs from node""" node_to_release = sorted_scores[len(sorted_scores) - 1][0] instances = self.compute_model.get_node_instances( self.compute_model.get_node_by_uuid(node_to_release)) instances_to_migrate = self.filter_instances_by_audit_tag(instances) instance_score = [] for instance in instances_to_migrate: if instance.state == element.InstanceState.ACTIVE.value: instance_score.append( (instance, self.calculate_score_instance(instance))) return node_to_release, instance_score def create_migration_instance(self, mig_instance, mig_source_node, mig_destination_node): """Create migration VM""" if self.compute_model.migrate_instance( mig_instance, mig_source_node, mig_destination_node): self.add_action_migrate(mig_instance, 'live', mig_source_node, mig_destination_node) if len(self.compute_model.get_node_instances(mig_source_node)) == 0: self.add_action_disable_node(mig_source_node) self.number_of_released_nodes += 1 def calculate_num_migrations(self, sorted_instances, node_to_release, sorted_score): number_migrations = 0 for mig_instance, __ in sorted_instances: # skip exclude instance when migrating if mig_instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", mig_instance.uuid) continue for node_uuid, __ in sorted_score: mig_source_node = self.compute_model.get_node_by_uuid( node_to_release) mig_destination_node = self.compute_model.get_node_by_uuid( node_uuid) result = self.check_migration( mig_source_node, mig_destination_node, mig_instance) if result: self.create_migration_instance( mig_instance, mig_source_node, mig_destination_node) number_migrations += 1 break return number_migrations def unsuccessful_migration_actualization(self, number_migrations, unsuccessful_migration): if number_migrations > 0: self.number_of_migrations += number_migrations return 0 else: return unsuccessful_migration + 1 def pre_execute(self): self._pre_execute() # backwards compatibility for node parameter. if self.aggregation_method['node'] != '': LOG.warning('Parameter node has been renamed to compute_node and ' 'will be removed in next release.') self.aggregation_method['compute_node'] = \ self.aggregation_method['node'] def do_execute(self, audit=None): unsuccessful_migration = 0 scores = self.compute_score_of_nodes() # Sort compute nodes by Score decreasing sorted_scores = sorted(scores, reverse=True, key=lambda x: (x[1])) LOG.debug("Compute node(s) BFD %s", sorted_scores) # Get Node to be released if len(scores) == 0: LOG.warning( "The workloads of the compute nodes" " of the cluster is zero") return while sorted_scores and ( not self.migration_attempts or self.migration_attempts >= unsuccessful_migration): node_to_release, instance_score = self.node_and_instance_score( sorted_scores) # Sort instances by Score sorted_instances = sorted( instance_score, reverse=True, key=lambda x: (x[1])) # BFD: Best Fit Decrease LOG.debug("Instance(s) BFD %s", sorted_instances) migrations = self.calculate_num_migrations( sorted_instances, node_to_release, sorted_scores) unsuccessful_migration = self.unsuccessful_migration_actualization( migrations, unsuccessful_migration) if not migrations: # We don't have any possible migrations to perform on this node # so we discard the node so we can try to migrate instances # from the next one in the list sorted_scores.pop() infos = { "compute_nodes_count": self.number_of_enabled_nodes, "released_compute_nodes_count": self.number_of_released_nodes, "instance_migrations_count": self.number_of_migrations, "efficacy": self.efficacy } LOG.debug(infos) def post_execute(self): self.solution.set_efficacy_indicators( compute_nodes_count=self.number_of_enabled_nodes, released_compute_nodes_count=self.number_of_released_nodes, instance_migrations_count=self.number_of_migrations, ) LOG.debug(self.compute_model.to_string()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/dummy_strategy.py0000664000175000017500000000545200000000000031105 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class DummyStrategy(base.DummyBaseStrategy): """Dummy strategy used for integration testing via Tempest *Description* This strategy does not provide any useful optimization. Its only purpose is to be used by Tempest tests. *Requirements* *Limitations* Do not use in production. *Spec URL* """ NOP = "nop" SLEEP = "sleep" def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): para1 = self.input_parameters.para1 para2 = self.input_parameters.para2 LOG.debug("Executing Dummy strategy with para1=%(p1)f, para2=%(p2)s", {'p1': para1, 'p2': para2}) parameters = {'message': 'hello World'} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) parameters = {'message': para2} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) self.solution.add_action(action_type=self.SLEEP, input_parameters={'duration': para1}) def post_execute(self): pass @classmethod def get_name(cls): return "dummy" @classmethod def get_display_name(cls): return _("Dummy strategy") @classmethod def get_translatable_display_name(cls): return "Dummy strategy" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "para1": { "description": "number parameter example", "type": "number", "default": 3.2, "minimum": 1.0, "maximum": 10.2, }, "para2": { "description": "string parameter example", "type": "string", "default": "hello" }, }, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/dummy_with_resize.py0000664000175000017500000000702100000000000031571 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class DummyWithResize(base.DummyBaseStrategy): """Dummy strategy used for integration testing via Tempest *Description* This strategy does not provide any useful optimization. Its only purpose is to be used by Tempest tests. *Requirements* *Limitations* Do not use in production. *Spec URL* """ NOP = "nop" SLEEP = "sleep" def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): para1 = self.input_parameters.para1 para2 = self.input_parameters.para2 LOG.debug("Executing Dummy strategy with para1=%(p1)f, para2=%(p2)s", {'p1': para1, 'p2': para2}) parameters = {'message': 'hello World'} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) parameters = {'message': 'Welcome'} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) self.solution.add_action(action_type=self.SLEEP, input_parameters={'duration': 5.0}) self.solution.add_action( action_type='migrate', resource_id='b199db0c-1408-4d52-b5a5-5ca14de0ff36', input_parameters={ 'source_node': 'compute2', 'destination_node': 'compute3', 'migration_type': 'live'}) self.solution.add_action( action_type='migrate', resource_id='8db1b3c1-7938-4c34-8c03-6de14b874f8f', input_parameters={ 'source_node': 'compute2', 'destination_node': 'compute3', 'migration_type': 'live'} ) self.solution.add_action( action_type='resize', resource_id='8db1b3c1-7938-4c34-8c03-6de14b874f8f', input_parameters={'flavor': 'x2'} ) def post_execute(self): pass @classmethod def get_name(cls): return "dummy_with_resize" @classmethod def get_display_name(cls): return _("Dummy strategy with resize") @classmethod def get_translatable_display_name(cls): return "Dummy strategy with resize" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "para1": { "description": "number parameter example", "type": "number", "default": 3.2, "minimum": 1.0, "maximum": 10.2, }, "para2": { "description": "string parameter example", "type": "string", "default": "hello" }, }, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/dummy_with_scorer.py0000664000175000017500000001417200000000000031572 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import random from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import units from watcher._i18n import _ from watcher.decision_engine.scoring import scoring_factory from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class DummyWithScorer(base.DummyBaseStrategy): """A dummy strategy using dummy scoring engines. This is a dummy strategy demonstrating how to work with scoring engines. One scoring engine is predicting the workload type of a machine based on the telemetry data, the other one is simply calculating the average value for given elements in a list. Results are then passed to the NOP action. The strategy is presenting the whole workflow: - Get a reference to a scoring engine - Prepare input data (features) for score calculation - Perform score calculation - Use scorer's metadata for results interpretation """ DEFAULT_NAME = "dummy_with_scorer" DEFAULT_DESCRIPTION = "Dummy Strategy with Scorer" NOP = "nop" SLEEP = "sleep" def __init__(self, config, osc=None): """Constructor: the signature should be identical within the subclasses :param config: Configuration related to this plugin :type config: :py:class:`~.Struct` :param osc: An OpenStackClients instance :type osc: :py:class:`~.OpenStackClients` instance """ super(DummyWithScorer, self).__init__(config, osc) # Setup Scoring Engines self._workload_scorer = (scoring_factory .get_scoring_engine('dummy_scorer')) self._avg_scorer = (scoring_factory .get_scoring_engine('dummy_avg_scorer')) # Get metainfo from Workload Scorer for result interpretation metainfo = jsonutils.loads(self._workload_scorer.get_metainfo()) self._workloads = {index: workload for index, workload in enumerate( metainfo['workloads'])} def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): # Simple "hello world" from strategy param1 = self.input_parameters.param1 param2 = self.input_parameters.param2 LOG.debug('DummyWithScorer params: param1=%(p1)f, param2=%(p2)s', {'p1': param1, 'p2': param2}) parameters = {'message': 'Hello from Dummy Strategy with Scorer!'} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) # Demonstrate workload scorer features = self._generate_random_telemetry() result_str = self._workload_scorer.calculate_score(features) LOG.debug('Workload Scorer result: %s', result_str) # Parse the result using workloads from scorer's metainfo result = self._workloads[jsonutils.loads(result_str)[0]] LOG.debug('Detected Workload: %s', result) parameters = {'message': 'Detected Workload: %s' % result} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) # Demonstrate AVG scorer features = jsonutils.dumps(random.sample(range(1000), 20)) result_str = self._avg_scorer.calculate_score(features) LOG.debug('AVG Scorer result: %s', result_str) result = jsonutils.loads(result_str)[0] LOG.debug('AVG Scorer result (parsed): %d', result) parameters = {'message': 'AVG Scorer result: %s' % result} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) # Sleep action self.solution.add_action(action_type=self.SLEEP, input_parameters={'duration': 5.0}) def post_execute(self): pass @classmethod def get_name(cls): return 'dummy_with_scorer' @classmethod def get_display_name(cls): return _('Dummy Strategy using sample Scoring Engines') @classmethod def get_translatable_display_name(cls): return 'Dummy Strategy using sample Scoring Engines' @classmethod def get_schema(cls): # Mandatory default setting for each element return { 'properties': { 'param1': { 'description': 'number parameter example', 'type': 'number', 'default': 3.2, 'minimum': 1.0, 'maximum': 10.2, }, 'param2': { 'description': 'string parameter example', 'type': "string", 'default': "hello" }, }, } def _generate_random_telemetry(self): processor_time = random.randint(0, 100) mem_total_bytes = 4*units.Gi mem_avail_bytes = random.randint(1*units.Gi, 4*units.Gi) mem_page_reads = random.randint(0, 2000) mem_page_writes = random.randint(0, 2000) disk_read_bytes = random.randint(0*units.Mi, 200*units.Mi) disk_write_bytes = random.randint(0*units.Mi, 200*units.Mi) net_bytes_received = random.randint(0*units.Mi, 20*units.Mi) net_bytes_sent = random.randint(0*units.Mi, 10*units.Mi) return jsonutils.dumps([ processor_time, mem_total_bytes, mem_avail_bytes, mem_page_reads, mem_page_writes, disk_read_bytes, disk_write_bytes, net_bytes_received, net_bytes_sent]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/host_maintenance.py0000664000175000017500000002643500000000000031353 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 chinac.com # # Authors: suzhengwei # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class HostMaintenance(base.HostMaintenanceBaseStrategy): """[PoC]Host Maintenance *Description* It is a migration strategy for one compute node maintenance, without having the user's application been interrupted. If given one backup node, the strategy will firstly migrate all instances from the maintenance node to the backup node. If the backup node is not provided, it will migrate all instances, relying on nova-scheduler. *Requirements* * You must have at least 2 physical compute nodes to run this strategy. *Limitations* - This is a proof of concept that is not meant to be used in production - It migrates all instances from one host to other hosts. It's better to execute such strategy when load is not heavy, and use this algorithm with `ONESHOT` audit. - It assumes that cold and live migrations are possible. """ INSTANCE_MIGRATION = "migrate" CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" REASON_FOR_DISABLE = 'watcher_disabled' def __init__(self, config, osc=None): super(HostMaintenance, self).__init__(config, osc) @classmethod def get_name(cls): return "host_maintenance" @classmethod def get_display_name(cls): return _("Host Maintenance Strategy") @classmethod def get_translatable_display_name(cls): return "Host Maintenance Strategy" @classmethod def get_schema(cls): return { "properties": { "maintenance_node": { "description": "The name of the compute node which " "need maintenance", "type": "string", }, "backup_node": { "description": "The name of the compute node which " "will backup the maintenance node.", "type": "string", }, }, "required": ["maintenance_node"], } def get_disabled_compute_nodes_with_reason(self, reason=None): return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status == element.ServiceState.DISABLED.value and cn.disabled_reason == reason} def get_disabled_compute_nodes(self): return self.get_disabled_compute_nodes_with_reason( self.REASON_FOR_DISABLE) def get_instance_state_str(self, instance): """Get instance state in string format""" if isinstance(instance.state, str): return instance.state elif isinstance(instance.state, element.InstanceState): return instance.state.value else: LOG.error('Unexpected instance state type, ' 'state=%(state)s, state_type=%(st)s.', dict(state=instance.state, st=type(instance.state))) raise exception.WatcherException def get_node_status_str(self, node): """Get node status in string format""" if isinstance(node.status, str): return node.status elif isinstance(node.status, element.ServiceState): return node.status.value else: LOG.error('Unexpected node status type, ' 'status=%(status)s, status_type=%(st)s.', dict(status=node.status, st=type(node.status))) raise exception.WatcherException def get_node_capacity(self, node): """Collect cpu, ram and disk capacity of a node. :param node: node object :return: dict(cpu(cores), ram(MB), disk(B)) """ return dict(cpu=node.vcpu_capacity, ram=node.memory_mb_capacity, disk=node.disk_gb_capacity) def host_fits(self, source_node, destination_node): """check host fits return True if VMs could intensively migrate from source_node to destination_node. """ source_node_used = self.compute_model.get_node_used_resources( source_node) destination_node_free = self.compute_model.get_node_free_resources( destination_node) metrics = ['vcpu', 'memory'] for m in metrics: if source_node_used[m] > destination_node_free[m]: return False return True def add_action_enable_compute_node(self, node): """Add an action for node enabler into the solution.""" params = {'state': element.ServiceState.ENABLED.value, 'resource_name': node.hostname} self.solution.add_action( action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=params) def add_action_maintain_compute_node(self, node): """Add an action for node maintenance into the solution.""" params = {'state': element.ServiceState.DISABLED.value, 'disabled_reason': self.REASON_FOR_MAINTAINING, 'resource_name': node.hostname} self.solution.add_action( action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=params) def enable_compute_node_if_disabled(self, node): node_status_str = self.get_node_status_str(node) if node_status_str != element.ServiceState.ENABLED.value: self.add_action_enable_compute_node(node) def instance_migration(self, instance, src_node, des_node=None): """Add an action for instance migration into the solution. :param instance: instance object :param src_node: node object :param des_node: node object. if None, the instance will be migrated relying on nova-scheduler :return: None """ instance_state_str = self.get_instance_state_str(instance) if instance_state_str == element.InstanceState.ACTIVE.value: migration_type = 'live' else: migration_type = 'cold' params = {'migration_type': migration_type, 'source_node': src_node.uuid, 'resource_name': instance.name} if des_node: params['destination_node'] = des_node.uuid self.solution.add_action(action_type=self.INSTANCE_MIGRATION, resource_id=instance.uuid, input_parameters=params) def host_migration(self, source_node, destination_node): """host migration Migrate all instances from source_node to destination_node. Active instances use "live-migrate", and other instances use "cold-migrate" """ instances = self.compute_model.get_node_instances(source_node) for instance in instances: self.instance_migration(instance, source_node, destination_node) def safe_maintain(self, maintenance_node, backup_node=None): """safe maintain one compute node Migrate all instances of the maintenance_node intensively to the backup host. If the user didn't give the backup host, it will select one unused node to backup the maintaining node. It calculate the resource both of the backup node and maintaining node to evaluate the migrations from maintaining node to backup node. If all instances of the maintaining node can migrated to the backup node, it will set the maintaining node in 'watcher_maintaining' status, and add the migrations to solution. """ # If the user gives a backup node with required capacity, then migrates # all instances from the maintaining node to the backup node. if backup_node: if self.host_fits(maintenance_node, backup_node): self.enable_compute_node_if_disabled(backup_node) self.add_action_maintain_compute_node(maintenance_node) self.host_migration(maintenance_node, backup_node) return True # If the user didn't give the backup host, select one unused # node with required capacity, then migrates all instances # from maintaining node to it. nodes = sorted( self.get_disabled_compute_nodes().values(), key=lambda x: self.get_node_capacity(x)['cpu']) if maintenance_node in nodes: nodes.remove(maintenance_node) for node in nodes: if self.host_fits(maintenance_node, node): self.enable_compute_node_if_disabled(node) self.add_action_maintain_compute_node(maintenance_node) self.host_migration(maintenance_node, node) return True return False def try_maintain(self, maintenance_node): """try to maintain one compute node It firstly set the maintenance_node in 'watcher_maintaining' status. Then try to migrate all instances of the maintenance node, rely on nova-scheduler. """ self.add_action_maintain_compute_node(maintenance_node) instances = self.compute_model.get_node_instances(maintenance_node) for instance in instances: self.instance_migration(instance, maintenance_node) def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): LOG.info(_('Executing Host Maintenance Migration Strategy')) maintenance_node = self.input_parameters.get('maintenance_node') backup_node = self.input_parameters.get('backup_node') # if no VMs in the maintenance_node, just maintain the compute node src_node = self.compute_model.get_node_by_name(maintenance_node) if len(self.compute_model.get_node_instances(src_node)) == 0: if (src_node.disabled_reason != self.REASON_FOR_MAINTAINING): self.add_action_maintain_compute_node(src_node) return if backup_node: des_node = self.compute_model.get_node_by_name(backup_node) else: des_node = None if not self.safe_maintain(src_node, des_node): self.try_maintain(src_node) def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ LOG.debug(self.solution.actions) LOG.debug(self.compute_model.to_string()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/node_resource_consolidation.py0000664000175000017500000002603400000000000033610 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base from watcher import objects LOG = log.getLogger(__name__) class NodeResourceConsolidation(base.ServerConsolidationBaseStrategy): """consolidating resources on nodes using server migration *Description* This strategy checks the resource usages of compute nodes, if the used resources are less than total, it will try to migrate server to consolidate the use of resource. *Requirements* * You must have at least 2 compute nodes to run this strategy. * Hardware: compute nodes should use the same physical CPUs/RAMs *Limitations* * This is a proof of concept that is not meant to be used in production * It assume that live migrations are possible *Spec URL* http://specs.openstack.org/openstack/watcher-specs/specs/train/implemented/node-resource-consolidation.html """ CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" REASON_FOR_DISABLE = 'Watcher node resource consolidation strategy' def __init__(self, config, osc=None): """node resource consolidation :param config: A mapping containing the configuration of this strategy :type config: :py:class:`~.Struct` instance :param osc: :py:class:`~.OpenStackClients` instance """ super(NodeResourceConsolidation, self).__init__(config, osc) self.host_choice = 'auto' self.audit = None self.compute_nodes_count = 0 self.number_of_released_nodes = 0 self.number_of_migrations = 0 @classmethod def get_name(cls): return "node_resource_consolidation" @classmethod def get_display_name(cls): return _("Node Resource Consolidation strategy") @classmethod def get_translatable_display_name(cls): return "Node Resource Consolidation strategy" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "host_choice": { "description": "The way to select the server migration " "destination node. The value 'auto' " "means that Nova scheduler selects " "the destination node, and 'specify' " "means the strategy specifies the " "destination.", "type": "string", "default": 'auto' }, }, } def check_resources(self, servers, destination): # check whether a node able to accommodate a VM dest_flag = False if not destination: return dest_flag free_res = self.compute_model.get_node_free_resources(destination) for server in servers: # just vcpu and memory, do not consider disk if free_res['vcpu'] >= server.vcpus and ( free_res['memory'] >= server.memory): free_res['vcpu'] -= server.vcpus free_res['memory'] -= server.memory dest_flag = True servers.remove(server) return dest_flag def select_destination(self, server, source, destinations): dest_node = None if not destinations: return dest_node sorted_nodes = sorted( destinations, key=lambda x: self.compute_model.get_node_free_resources( x)['vcpu']) for dest in sorted_nodes: if self.check_resources([server], dest): if self.compute_model.migrate_instance(server, source, dest): dest_node = dest break return dest_node def add_migrate_actions(self, sources, destinations): if not sources or not destinations: return for node in sources: servers = self.compute_model.get_node_instances(node) sorted_servers = sorted( servers, key=lambda x: x.vcpus, reverse=True) for server in sorted_servers: parameters = {'migration_type': 'live', 'source_node': node.hostname, 'resource_name': server.name} action_flag = False if self.host_choice != 'auto': # specify destination host dest = self.select_destination(server, node, destinations) if dest: parameters['destination_node'] = dest.hostname action_flag = True else: action_flag = True if action_flag: self.number_of_migrations += 1 self.solution.add_action( action_type=self.MIGRATION, resource_id=server.uuid, input_parameters=parameters) def add_change_node_state_actions(self, nodes, status): if status not in (element.ServiceState.DISABLED.value, element.ServiceState.ENABLED.value): raise exception.IllegalArgumentException( message=_("The node status is not defined")) changed_nodes = [] for node in nodes: if node.status != status: parameters = {'state': status, 'resource_name': node.hostname} if status == element.ServiceState.DISABLED.value: parameters['disabled_reason'] = self.REASON_FOR_DISABLE self.solution.add_action( action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=parameters) node.status = status changed_nodes.append(node) return changed_nodes def get_nodes_migrate_failed(self): # check if migration action ever failed # just for continuous audit nodes_failed = [] if self.audit is None or ( self.audit.audit_type == objects.audit.AuditType.ONESHOT.value): return nodes_failed filters = {'audit_uuid': self.audit.uuid} actions = objects.action.Action.list( self.ctx, filters=filters) for action in actions: if action.state == objects.action.State.FAILED and ( action.action_type == self.MIGRATION): server_uuid = action.input_parameters.get('resource_id') node = self.compute_model.get_node_by_instance_uuid( server_uuid) if node not in nodes_failed: nodes_failed.append(node) return nodes_failed def group_nodes(self, nodes): free_nodes = [] source_nodes = [] dest_nodes = [] nodes_failed = self.get_nodes_migrate_failed() LOG.info("nodes: %s migration failed", nodes_failed) sorted_nodes = sorted( nodes, key=lambda x: self.compute_model.get_node_used_resources( x)['vcpu']) for node in sorted_nodes: if node in dest_nodes: break # If ever migration failed, do not migrate again if node in nodes_failed: # maybe can as the destination node if node.status == element.ServiceState.ENABLED.value: dest_nodes.append(node) continue used_resource = self.compute_model.get_node_used_resources(node) if used_resource['vcpu'] > 0: servers = self.compute_model.get_node_instances(node) for dest in reversed(sorted_nodes): # skip if compute node is disabled if dest.status == element.ServiceState.DISABLED.value: LOG.info("node %s is down", dest.hostname) continue if dest in dest_nodes: continue if node == dest: # The last on as destination node dest_nodes.append(dest) break if self.check_resources(servers, dest): dest_nodes.append(dest) if node not in source_nodes: source_nodes.append(node) if not servers: break else: free_nodes.append(node) return free_nodes, source_nodes, dest_nodes def pre_execute(self): self._pre_execute() self.host_choice = self.input_parameters.get('host_choice', 'auto') self.planner = 'node_resource_consolidation' def do_execute(self, audit=None): """Strategy execution phase Executing strategy and creating solution. """ self.audit = audit nodes = list(self.compute_model.get_all_compute_nodes().values()) free_nodes, source_nodes, dest_nodes = self.group_nodes(nodes) self.compute_nodes_count = len(nodes) self.number_of_released_nodes = len(source_nodes) LOG.info("Free nodes: %s", free_nodes) LOG.info("Source nodes: %s", source_nodes) LOG.info("Destination nodes: %s", dest_nodes) if not source_nodes: LOG.info("No compute node needs to be consolidated") return nodes_disabled = [] if self.host_choice == 'auto': # disable compute node to avoid to be select by Nova scheduler nodes_disabled = self.add_change_node_state_actions( free_nodes+source_nodes, element.ServiceState.DISABLED.value) self.add_migrate_actions(source_nodes, dest_nodes) if nodes_disabled: # restore disabled compute node after migration self.add_change_node_state_actions( nodes_disabled, element.ServiceState.ENABLED.value) def post_execute(self): """Post-execution phase """ self.solution.set_efficacy_indicators( compute_nodes_count=self.compute_nodes_count, released_compute_nodes_count=self.number_of_released_nodes, instance_migrations_count=self.number_of_migrations, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/noisy_neighbor.py0000664000175000017500000002331000000000000031037 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) CONF = cfg.CONF class NoisyNeighbor(base.NoisyNeighborBaseStrategy): """Noisy Neighbor strategy using live migration *Description* This strategy can identify and migrate a Noisy Neighbor - a low priority VM that negatively affects performance of a high priority VM in terms of IPC by over utilizing Last Level Cache. *Requirements* To enable LLC metric, latest Intel server with CMT support is required. *Limitations* This is a proof of concept that is not meant to be used in production *Spec URL* http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/noisy_neighbor_strategy.html """ MIGRATION = "migrate" DATASOURCE_METRICS = ['instance_l3_cache_usage'] DEFAULT_WATCHER_PRIORITY = 5 def __init__(self, config, osc=None): super(NoisyNeighbor, self).__init__(config, osc) self.meter_name = 'instance_l3_cache_usage' @classmethod def get_name(cls): return "noisy_neighbor" @classmethod def get_display_name(cls): return _("Noisy Neighbor") @classmethod def get_translatable_display_name(cls): return "Noisy Neighbor" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "cache_threshold": { "description": "Performance drop in L3_cache threshold " "for migration", "type": "number", "default": 35.0 }, "period": { "description": "Aggregate time period of " "ceilometer and gnocchi", "type": "number", "default": 100.0 }, }, } def get_current_and_previous_cache(self, instance): try: curr_cache = self.datasource_backend.get_instance_l3_cache_usage( instance, self.meter_name, self.period, 'mean', granularity=300) previous_cache = 2 * ( self.datasource_backend.get_instance_l3_cache_usage( instance, self.meter_name, 2 * self.period, 'mean', granularity=300)) - curr_cache except Exception as exc: LOG.exception(exc) return None, None return curr_cache, previous_cache def find_priority_instance(self, instance): current_cache, previous_cache = \ self.get_current_and_previous_cache(instance) if None in (current_cache, previous_cache): LOG.warning("Datasource unable to pick L3 Cache " "values. Skipping the instance") return None if (current_cache < (1 - (self.cache_threshold / 100.0)) * previous_cache): return instance else: return None def find_noisy_instance(self, instance): noisy_current_cache, noisy_previous_cache = \ self.get_current_and_previous_cache(instance) if None in (noisy_current_cache, noisy_previous_cache): LOG.warning("Datasource unable to pick " "L3 Cache. Skipping the instance") return None if (noisy_current_cache > (1 + (self.cache_threshold / 100.0)) * noisy_previous_cache): return instance else: return None def group_hosts(self): nodes = self.compute_model.get_all_compute_nodes() hosts_need_release = {} hosts_target = [] for node in nodes.values(): instances_of_node = self.compute_model.get_node_instances(node) node_instance_count = len(instances_of_node) # Flag that tells us whether to skip the node or not. If True, # the node is skipped. Will be true if we find a noisy instance or # when potential priority instance will be same as potential noisy # instance loop_break_flag = False if node_instance_count > 1: instance_priority_list = [] for instance in instances_of_node: instance_priority_list.append(instance) # If there is no metadata regarding watcher-priority, it takes # DEFAULT_WATCHER_PRIORITY as priority. instance_priority_list.sort(key=lambda a: ( a.get('metadata').get('watcher-priority'), self.DEFAULT_WATCHER_PRIORITY)) instance_priority_list_reverse = list(instance_priority_list) instance_priority_list_reverse.reverse() for potential_priority_instance in instance_priority_list: priority_instance = self.find_priority_instance( potential_priority_instance) if (priority_instance is not None): for potential_noisy_instance in ( instance_priority_list_reverse): if (potential_noisy_instance == potential_priority_instance): loop_break_flag = True break noisy_instance = self.find_noisy_instance( potential_noisy_instance) if noisy_instance is not None: hosts_need_release[node.uuid] = { 'priority_vm': potential_priority_instance, 'noisy_vm': potential_noisy_instance} LOG.debug("Priority VM found: %s", potential_priority_instance.uuid) LOG.debug("Noisy VM found: %s", potential_noisy_instance.uuid) loop_break_flag = True break # No need to check other instances in the node if loop_break_flag is True: break if node.uuid not in hosts_need_release: hosts_target.append(node) return hosts_need_release, hosts_target def filter_dest_servers(self, hosts, instance_to_migrate): required_cores = instance_to_migrate.vcpus required_disk = instance_to_migrate.disk required_memory = instance_to_migrate.memory dest_servers = [] for host in hosts: free_res = self.compute_model.get_node_free_resources(host) if (free_res['vcpu'] >= required_cores and free_res['disk'] >= required_disk and free_res['memory'] >= required_memory): dest_servers.append(host) return dest_servers def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): self.cache_threshold = self.input_parameters.cache_threshold self.period = self.input_parameters.period hosts_need_release, hosts_target = self.group_hosts() if len(hosts_need_release) == 0: LOG.debug("No hosts require optimization") return if len(hosts_target) == 0: LOG.debug("No hosts available to migrate") return mig_source_node_name = max(hosts_need_release.keys(), key=lambda a: hosts_need_release[a]['priority_vm']) instance_to_migrate = hosts_need_release[mig_source_node_name][ 'noisy_vm'] if instance_to_migrate is None: return dest_servers = self.filter_dest_servers(hosts_target, instance_to_migrate) if len(dest_servers) == 0: LOG.info("No proper target host could be found") return # Destination node will be the first available node in the list. mig_destination_node = dest_servers[0] mig_source_node = self.compute_model.get_node_by_uuid( mig_source_node_name) if self.compute_model.migrate_instance(instance_to_migrate, mig_source_node, mig_destination_node): parameters = {'migration_type': 'live', 'source_node': mig_source_node.uuid, 'destination_node': mig_destination_node.uuid, 'resource_name': instance_to_migrate.name} self.solution.add_action(action_type=self.MIGRATION, resource_id=instance_to_migrate.uuid, input_parameters=parameters) def post_execute(self): self.solution.model = self.compute_model LOG.debug(self.compute_model.to_string()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/outlet_temp_control.py0000664000175000017500000002540200000000000032126 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 Intel Corp # # Authors: Junjie-Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # """ *Good Thermal Strategy* Towards to software defined infrastructure, the power and thermal intelligences is being adopted to optimize workload, which can help improve efficiency, reduce power, as well as to improve datacenter PUE and lower down operation cost in data center. Outlet (Exhaust Air) Temperature is one of the important thermal telemetries to measure thermal/workload status of server. This strategy makes decisions to migrate workloads to the hosts with good thermal condition (lowest outlet temperature) when the outlet temperature of source hosts reach a configurable threshold. """ from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class OutletTempControl(base.ThermalOptimizationBaseStrategy): """[PoC] Outlet temperature control using live migration *Description* It is a migration strategy based on the outlet temperature of compute hosts. It generates solutions to move a workload whenever a server's outlet temperature is higher than the specified threshold. *Requirements* * Hardware: All computer hosts should support IPMI and PTAS technology * Software: Ceilometer component ceilometer-agent-ipmi running in each compute host, and Ceilometer API can report such telemetry ``hardware.ipmi.node.outlet_temperature`` successfully. * You must have at least 2 physical compute hosts to run this strategy. *Limitations* - This is a proof of concept that is not meant to be used in production - We cannot forecast how many servers should be migrated. This is the reason why we only plan a single virtual machine migration at a time. So it's better to use this algorithm with `CONTINUOUS` audits. - It assume that live migrations are possible *Spec URL* https://github.com/openstack/watcher-specs/blob/master/specs/mitaka/implemented/outlet-temperature-based-strategy.rst """ # The meter to report outlet temperature in ceilometer MIGRATION = "migrate" DATASOURCE_METRICS = ['host_outlet_temp'] def __init__(self, config, osc=None): """Outlet temperature control using live migration :param config: A mapping containing the configuration of this strategy :type config: dict :param osc: an OpenStackClients object, defaults to None :type osc: :py:class:`~.OpenStackClients` instance, optional """ super(OutletTempControl, self).__init__(config, osc) @classmethod def get_name(cls): return "outlet_temperature" @classmethod def get_display_name(cls): return _("Outlet temperature based strategy") @classmethod def get_translatable_display_name(cls): return "Outlet temperature based strategy" @property def period(self): return self.input_parameters.get('period', 30) @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "threshold": { "description": "temperature threshold for migration", "type": "number", "default": 35.0 }, "period": { "description": "The time interval in seconds for " "getting statistic aggregation", "type": "number", "default": 30 }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "default": 300 }, }, } @property def granularity(self): return self.input_parameters.get('granularity', 300) def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value] return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def group_hosts_by_outlet_temp(self): """Group hosts based on outlet temp meters""" nodes = self.get_available_compute_nodes() hosts_need_release = [] hosts_target = [] metric_name = 'host_outlet_temp' for node in nodes.values(): outlet_temp = None outlet_temp = self.datasource_backend.statistic_aggregation( resource=node, resource_type='compute_node', meter_name=metric_name, period=self.period, granularity=self.granularity, ) # some hosts may not have outlet temp meters, remove from target if outlet_temp is None: LOG.warning("%s: no outlet temp data", node.uuid) continue LOG.debug("%(resource)s: outlet temperature %(temp)f", {'resource': node.uuid, 'temp': outlet_temp}) instance_data = {'compute_node': node, 'outlet_temp': outlet_temp} if outlet_temp >= self.threshold: # mark the node to release resources hosts_need_release.append(instance_data) else: hosts_target.append(instance_data) return hosts_need_release, hosts_target def choose_instance_to_migrate(self, hosts): """Pick up an active instance to migrate from provided hosts""" for instance_data in hosts: mig_source_node = instance_data['compute_node'] instances_of_src = self.compute_model.get_node_instances( mig_source_node) for instance in instances_of_src: try: # NOTE: skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue # select the first active instance to migrate if (instance.state != element.InstanceState.ACTIVE.value): LOG.info("Instance not active, skipped: %s", instance.uuid) continue return mig_source_node, instance except exception.InstanceNotFound as e: LOG.exception(e) LOG.info("Instance not found") return None def filter_dest_servers(self, hosts, instance_to_migrate): """Only return hosts with sufficient available resources""" required_cores = instance_to_migrate.vcpus required_disk = instance_to_migrate.disk required_memory = instance_to_migrate.memory # filter nodes without enough resource dest_servers = [] for instance_data in hosts: host = instance_data['compute_node'] # available free_res = self.compute_model.get_node_free_resources(host) if (free_res['vcpu'] >= required_cores and free_res['disk'] >= required_disk and free_res['memory'] >= required_memory): dest_servers.append(instance_data) return dest_servers def pre_execute(self): self._pre_execute() # the migration plan will be triggered when the outlet temperature # reaches threshold self.threshold = self.input_parameters.threshold LOG.info("Outlet temperature strategy threshold=%d", self.threshold) def do_execute(self, audit=None): hosts_need_release, hosts_target = self.group_hosts_by_outlet_temp() if len(hosts_need_release) == 0: # TODO(zhenzanz): return something right if there's no hot servers LOG.debug("No hosts require optimization") return self.solution if len(hosts_target) == 0: LOG.warning("No hosts under outlet temp threshold found") return self.solution # choose the server with highest outlet t hosts_need_release = sorted(hosts_need_release, reverse=True, key=lambda x: (x["outlet_temp"])) instance_to_migrate = self.choose_instance_to_migrate( hosts_need_release) # calculate the instance's cpu cores,memory,disk needs if instance_to_migrate is None: return self.solution mig_source_node, instance_src = instance_to_migrate dest_servers = self.filter_dest_servers(hosts_target, instance_src) # sort the filtered result by outlet temp # pick up the lowest one as dest server if len(dest_servers) == 0: # TODO(zhenzanz): maybe to warn that there's no resource # for instance. LOG.info("No proper target host could be found") return self.solution dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"])) # always use the host with lowerest outlet temperature mig_destination_node = dest_servers[0]['compute_node'] # generate solution to migrate the instance to the dest server, if self.compute_model.migrate_instance( instance_src, mig_source_node, mig_destination_node): parameters = {'migration_type': 'live', 'source_node': mig_source_node.uuid, 'destination_node': mig_destination_node.uuid, 'resource_name': instance_src.name} self.solution.add_action(action_type=self.MIGRATION, resource_id=instance_src.uuid, input_parameters=parameters) def post_execute(self): self.solution.model = self.compute_model # TODO(v-francoise): Add the indicators to the solution LOG.debug(self.compute_model.to_string()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/saving_energy.py0000664000175000017500000002170300000000000030665 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Authors: licanwei # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import random from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.common.metal_helper import constants as metal_constants from watcher.common.metal_helper import factory as metal_helper_factory from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class SavingEnergy(base.SavingEnergyBaseStrategy): """Saving Energy Strategy *Description* Saving Energy Strategy together with VM Workload Consolidation Strategy can perform the Dynamic Power Management (DPM) functionality, which tries to save power by dynamically consolidating workloads even further during periods of low resource utilization. Virtual machines are migrated onto fewer hosts and the unneeded hosts are powered off. After consolidation, Saving Energy Strategy produces a solution of powering off/on according to the following detailed policy: In this policy, a preset number(min_free_hosts_num) is given by user, and this min_free_hosts_num describes minimum free compute nodes that users expect to have, where "free compute nodes" refers to those nodes unused but still powered on. If the actual number of unused nodes(in power-on state) is larger than the given number, randomly select the redundant nodes and power off them; If the actual number of unused nodes(in poweron state) is smaller than the given number and there are spare unused nodes(in poweroff state), randomly select some nodes(unused,poweroff) and power on them. *Requirements* In this policy, in order to calculate the min_free_hosts_num, users must provide two parameters: * One parameter("min_free_hosts_num") is a constant int number. This number should be int type and larger than zero. * The other parameter("free_used_percent") is a percentage number, which describes the quotient of min_free_hosts_num/nodes_with_VMs_num, where nodes_with_VMs_num is the number of nodes with VMs running on it. This parameter is used to calculate a dynamic min_free_hosts_num. The nodes with VMs refer to those nodes with VMs running on it. Then choose the larger one as the final min_free_hosts_num. *Limitations* * at least 2 physical compute hosts *Spec URL* http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html """ def __init__(self, config, osc=None): super(SavingEnergy, self).__init__(config, osc) self._metal_helper = None self._nova_client = None self.with_vms_node_pool = [] self.free_poweron_node_pool = [] self.free_poweroff_node_pool = [] self.free_used_percent = 0 self.min_free_hosts_num = 1 @property def metal_helper(self): if not self._metal_helper: self._metal_helper = metal_helper_factory.get_helper(self.osc) return self._metal_helper @property def nova_client(self): if not self._nova_client: self._nova_client = self.osc.nova() return self._nova_client @classmethod def get_name(cls): return "saving_energy" @classmethod def get_display_name(cls): return _("Saving Energy Strategy") @classmethod def get_translatable_display_name(cls): return "Saving Energy Strategy" @classmethod def get_schema(cls): """return a schema of two input parameters The standby nodes refer to those nodes unused but still poweredon to deal with boom of new instances. """ return { "properties": { "free_used_percent": { "description": ("a rational number, which describes the" " quotient of" " min_free_hosts_num/nodes_with_VMs_num" " where nodes_with_VMs_num is the number" " of nodes with VMs"), "type": "number", "default": 10.0 }, "min_free_hosts_num": { "description": ("minimum number of hosts without VMs" " but still powered on"), "type": "number", "default": 1 }, }, } def add_action_poweronoff_node(self, node, state): """Add an action for node disability into the solution. :param node: node :param state: node power state, power on or power off :return: None """ params = {'state': state, 'resource_name': node.get_hypervisor_hostname()} self.solution.add_action( action_type='change_node_power_state', resource_id=node.get_id(), input_parameters=params) def get_hosts_pool(self): """Get three pools, with_vms_node_pool, free_poweron_node_pool, free_poweroff_node_pool. """ node_list = self.metal_helper.list_compute_nodes() for node in node_list: hypervisor_node = node.get_hypervisor_node().to_dict() compute_service = hypervisor_node.get('service', None) host_name = compute_service.get('host') LOG.debug("Found hypervisor: %s", hypervisor_node) try: self.compute_model.get_node_by_name(host_name) except exception.ComputeNodeNotFound: LOG.info("The compute model does not contain the host: %s", host_name) continue if (node.hv_up_when_powered_off and hypervisor_node.get('state') != 'up'): # filter nodes that are not in 'up' state LOG.info("Ignoring node that isn't in 'up' state: %s", host_name) continue else: if (hypervisor_node['running_vms'] == 0): power_state = node.get_power_state() if power_state == metal_constants.PowerState.ON: self.free_poweron_node_pool.append(node) elif power_state == metal_constants.PowerState.OFF: self.free_poweroff_node_pool.append(node) else: LOG.info("Ignoring node %s, unknown state: %s", node, power_state) else: self.with_vms_node_pool.append(node) def save_energy(self): need_poweron = int(max( (len(self.with_vms_node_pool) * self.free_used_percent / 100), ( self.min_free_hosts_num))) len_poweron = len(self.free_poweron_node_pool) len_poweroff = len(self.free_poweroff_node_pool) LOG.debug("need_poweron: %s, len_poweron: %s, len_poweroff: %s", need_poweron, len_poweron, len_poweroff) if len_poweron > need_poweron: for node in random.sample(self.free_poweron_node_pool, (len_poweron - need_poweron)): self.add_action_poweronoff_node(node, metal_constants.PowerState.OFF) LOG.info("power off %s", node.get_id()) elif len_poweron < need_poweron: diff = need_poweron - len_poweron for node in random.sample(self.free_poweroff_node_pool, min(len_poweroff, diff)): self.add_action_poweronoff_node(node, metal_constants.PowerState.ON) LOG.info("power on %s", node.get_id()) def pre_execute(self): self._pre_execute() self.free_used_percent = self.input_parameters.free_used_percent self.min_free_hosts_num = self.input_parameters.min_free_hosts_num def do_execute(self, audit=None): """Strategy execution phase This phase is where you should put the main logic of your strategy. """ self.get_hosts_pool() self.save_energy() def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ self.solution.model = self.compute_model LOG.debug(self.compute_model.to_string()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/storage_capacity_balance.py0000664000175000017500000003646500000000000033026 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher._i18n import _ from watcher.common import cinder_helper from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class StorageCapacityBalance(base.WorkloadStabilizationBaseStrategy): """Storage capacity balance using cinder volume migration *Description* This strategy migrates volumes based on the workload of the cinder pools. It makes decision to migrate a volume whenever a pool's used utilization % is higher than the specified threshold. The volume to be moved should make the pool close to average workload of all cinder pools. *Requirements* * You must have at least 2 cinder volume pools to run this strategy. *Limitations* * Volume migration depends on the storage device. It may take a long time. *Spec URL* http://specs.openstack.org/openstack/watcher-specs/specs/queens/implemented/storage-capacity-balance.html """ def __init__(self, config, osc=None): """VolumeMigrate using cinder volume migration :param config: A mapping containing the configuration of this strategy :type config: :py:class:`~.Struct` instance :param osc: :py:class:`~.OpenStackClients` instance """ super(StorageCapacityBalance, self).__init__(config, osc) self._cinder = None self.volume_threshold = 80.0 self.pool_type_cache = dict() self.source_pools = [] self.dest_pools = [] @property def cinder(self): if not self._cinder: self._cinder = cinder_helper.CinderHelper(osc=self.osc) return self._cinder @classmethod def get_name(cls): return "storage_capacity_balance" @classmethod def get_display_name(cls): return _("Storage Capacity Balance Strategy") @classmethod def get_translatable_display_name(cls): return "Storage Capacity Balance Strategy" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "volume_threshold": { "description": "volume threshold for capacity balance", "type": "number", "default": 80.0 }, }, } @classmethod def get_config_opts(cls): return super(StorageCapacityBalance, cls).get_config_opts() + [ cfg.ListOpt( "ex_pools", help="exclude pools", default=['local_vstorage']), ] def get_pools(self, cinder): """Get all volume pools excepting ex_pools. :param cinder: cinder client :return: volume pools """ ex_pools = self.config.ex_pools pools = cinder.get_storage_pool_list() filtered_pools = [p for p in pools if p.pool_name not in ex_pools] return filtered_pools def get_volumes(self, cinder): """Get all volumes with status in available or in-use and no snapshot. :param cinder: cinder client :return: all volumes """ all_volumes = cinder.get_volume_list() valid_status = ['in-use', 'available'] volume_snapshots = cinder.get_volume_snapshots_list() snapshot_volume_ids = [] for snapshot in volume_snapshots: snapshot_volume_ids.append(snapshot.volume_id) nosnap_volumes = list(filter(lambda v: v.id not in snapshot_volume_ids, all_volumes)) LOG.info("volumes in snap: %s", snapshot_volume_ids) status_volumes = list( filter(lambda v: v.status in valid_status, nosnap_volumes)) valid_volumes = [v for v in status_volumes if getattr(v, 'migration_status') == 'success' or getattr(v, 'migration_status') is None] LOG.info("valid volumes: %s", valid_volumes) return valid_volumes def group_pools(self, pools, threshold): """group volume pools by threshold. :param pools: all volume pools :param threshold: volume threshold :return: under and over threshold pools """ under_pools = list( filter(lambda p: float(p.total_capacity_gb) - float(p.free_capacity_gb) < float(p.total_capacity_gb) * threshold, pools)) over_pools = list( filter(lambda p: float(p.total_capacity_gb) - float(p.free_capacity_gb) >= float(p.total_capacity_gb) * threshold, pools)) return over_pools, under_pools def get_volume_type_by_name(self, cinder, backendname): # return list of pool type if backendname in self.pool_type_cache.keys(): return self.pool_type_cache.get(backendname) volume_type_list = cinder.get_volume_type_list() volume_type = list(filter( lambda volume_type: volume_type.extra_specs.get( 'volume_backend_name') == backendname, volume_type_list)) if volume_type: self.pool_type_cache[backendname] = volume_type return self.pool_type_cache.get(backendname) else: return [] def migrate_fit(self, volume, threshold): target_pool_name = None if volume.volume_type: LOG.info("volume %s type %s", volume.id, volume.volume_type) return target_pool_name self.dest_pools.sort( key=lambda p: float(p.free_capacity_gb) / float(p.total_capacity_gb)) for pool in reversed(self.dest_pools): total_cap = float(pool.total_capacity_gb) allocated = float(pool.allocated_capacity_gb) ratio = pool.max_over_subscription_ratio if total_cap * ratio < allocated + float(volume.size): LOG.info("pool %s allocated over", pool.name) continue free_cap = float(pool.free_capacity_gb) - float(volume.size) if free_cap > (1 - threshold) * total_cap: target_pool_name = pool.name index = self.dest_pools.index(pool) setattr(self.dest_pools[index], 'free_capacity_gb', str(free_cap)) LOG.info("volume: get pool %s for vol %s", target_pool_name, volume.name) break return target_pool_name def check_pool_type(self, volume, dest_pool): target_type = None src_extra_specs = {} # check type feature if not volume.volume_type: return target_type volume_type_list = self.cinder.get_volume_type_list() volume_type = list(filter( lambda volume_type: volume_type.name == volume.volume_type, volume_type_list)) if volume_type: src_extra_specs = volume_type[0].extra_specs src_extra_specs.pop('volume_backend_name', None) backendname = getattr(dest_pool, 'volume_backend_name') dst_pool_type = self.get_volume_type_by_name(self.cinder, backendname) for src_key in src_extra_specs.keys(): dst_pool_type = [pt for pt in dst_pool_type if pt.extra_specs.get(src_key) == src_extra_specs.get(src_key)] if dst_pool_type: if volume.volume_type: if dst_pool_type[0].name != volume.volume_type: target_type = dst_pool_type[0].name else: target_type = dst_pool_type[0].name return target_type def retype_fit(self, volume, threshold): target_type = None self.dest_pools.sort( key=lambda p: float(p.free_capacity_gb) / float(p.total_capacity_gb)) for pool in reversed(self.dest_pools): backendname = getattr(pool, 'volume_backend_name') pool_type = self.get_volume_type_by_name(self.cinder, backendname) LOG.info("volume: pool %s, type %s", pool.name, pool_type) if pool_type is None: continue total_cap = float(pool.total_capacity_gb) allocated = float(pool.allocated_capacity_gb) ratio = pool.max_over_subscription_ratio if total_cap * ratio < allocated + float(volume.size): LOG.info("pool %s allocated over", pool.name) continue free_cap = float(pool.free_capacity_gb) - float(volume.size) if free_cap > (1 - threshold) * total_cap: target_type = self.check_pool_type(volume, pool) if target_type is None: continue index = self.dest_pools.index(pool) setattr(self.dest_pools[index], 'free_capacity_gb', str(free_cap)) LOG.info("volume: get type %s for vol %s", target_type, volume.name) break return target_type def get_actions(self, pool, volumes, threshold): """get volume, pool key-value action return: retype, migrate dict """ retype_dicts = dict() migrate_dicts = dict() total_cap = float(pool.total_capacity_gb) used_cap = float(pool.total_capacity_gb) - float(pool.free_capacity_gb) seek_flag = True volumes_in_pool = list( filter(lambda v: getattr(v, 'os-vol-host-attr:host') == pool.name, volumes)) LOG.info("volumes in pool: %s", str(volumes_in_pool)) if not volumes_in_pool: return retype_dicts, migrate_dicts ava_volumes = list(filter(lambda v: v.status == 'available', volumes_in_pool)) ava_volumes.sort(key=lambda v: float(v.size)) LOG.info("available volumes in pool: %s ", str(ava_volumes)) for vol in ava_volumes: vol_flag = False migrate_pool = self.migrate_fit(vol, threshold) if migrate_pool: migrate_dicts[vol.id] = migrate_pool vol_flag = True else: target_type = self.retype_fit(vol, threshold) if target_type: retype_dicts[vol.id] = target_type vol_flag = True if vol_flag: used_cap -= float(vol.size) if used_cap < threshold * total_cap: seek_flag = False break if seek_flag: noboot_volumes = list( filter(lambda v: v.bootable.lower() == 'false' and v.status == 'in-use', volumes_in_pool)) noboot_volumes.sort(key=lambda v: float(v.size)) LOG.info("noboot volumes: %s ", str(noboot_volumes)) for vol in noboot_volumes: vol_flag = False migrate_pool = self.migrate_fit(vol, threshold) if migrate_pool: migrate_dicts[vol.id] = migrate_pool vol_flag = True else: target_type = self.retype_fit(vol, threshold) if target_type: retype_dicts[vol.id] = target_type vol_flag = True if vol_flag: used_cap -= float(vol.size) if used_cap < threshold * total_cap: seek_flag = False break if seek_flag: boot_volumes = list( filter(lambda v: v.bootable.lower() == 'true' and v.status == 'in-use', volumes_in_pool) ) boot_volumes.sort(key=lambda v: float(v.size)) LOG.info("boot volumes: %s ", str(boot_volumes)) for vol in boot_volumes: vol_flag = False migrate_pool = self.migrate_fit(vol, threshold) if migrate_pool: migrate_dicts[vol.id] = migrate_pool vol_flag = True else: target_type = self.retype_fit(vol, threshold) if target_type: retype_dicts[vol.id] = target_type vol_flag = True if vol_flag: used_cap -= float(vol.size) if used_cap < threshold * total_cap: seek_flag = False break return retype_dicts, migrate_dicts def pre_execute(self): LOG.info("Initializing " + self.get_display_name() + " Strategy") self.volume_threshold = self.input_parameters.volume_threshold def do_execute(self, audit=None): """Strategy execution phase This phase is where you should put the main logic of your strategy. """ all_pools = self.get_pools(self.cinder) all_volumes = self.get_volumes(self.cinder) threshold = float(self.volume_threshold) / 100 self.source_pools, self.dest_pools = self.group_pools( all_pools, threshold) LOG.info(" source pools: %s dest pools:%s", self.source_pools, self.dest_pools) if not self.source_pools: LOG.info("No pools require optimization") return if not self.dest_pools: LOG.info("No enough pools for optimization") return for source_pool in self.source_pools: retype_actions, migrate_actions = self.get_actions( source_pool, all_volumes, threshold) for vol_id, pool_type in retype_actions.items(): vol = [v for v in all_volumes if v.id == vol_id] parameters = {'migration_type': 'retype', 'destination_type': pool_type, 'resource_name': vol[0].name} self.solution.add_action(action_type='volume_migrate', resource_id=vol_id, input_parameters=parameters) for vol_id, pool_name in migrate_actions.items(): vol = [v for v in all_volumes if v.id == vol_id] parameters = {'migration_type': 'migrate', 'destination_node': pool_name, 'resource_name': vol[0].name} self.solution.add_action(action_type='volume_migrate', resource_id=vol_id, input_parameters=parameters) def post_execute(self): """Post-execution phase """ self.solution.set_efficacy_indicators( instance_migrations_count=0, instances_count=0, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/uniform_airflow.py0000664000175000017500000003211700000000000031230 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Junjie-Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class UniformAirflow(base.BaseStrategy): """[PoC]Uniform Airflow using live migration *Description* It is a migration strategy based on the airflow of physical servers. It generates solutions to move VM whenever a server's airflow is higher than the specified threshold. *Requirements* * Hardware: compute node with NodeManager 3.0 support * Software: Ceilometer component ceilometer-agent-compute running in each compute node, and Ceilometer API can report such telemetry "airflow, system power, inlet temperature" successfully. * You must have at least 2 physical compute nodes to run this strategy *Limitations* - This is a proof of concept that is not meant to be used in production. - We cannot forecast how many servers should be migrated. This is the reason why we only plan a single virtual machine migration at a time. So it's better to use this algorithm with `CONTINUOUS` audits. - It assumes that live migrations are possible. """ # choose 300 seconds as the default duration of meter aggregation PERIOD = 300 DATASOURCE_METRICS = ['host_airflow', 'host_inlet_temp', 'host_power'] def __init__(self, config, osc=None): """Using live migration :param config: A mapping containing the configuration of this strategy :type config: dict :param osc: an OpenStackClients object """ super(UniformAirflow, self).__init__(config, osc) # The migration plan will be triggered when the airflow reaches # threshold self._period = self.PERIOD @classmethod def get_name(cls): return "uniform_airflow" @classmethod def get_display_name(cls): return _("Uniform airflow migration strategy") @classmethod def get_translatable_display_name(cls): return "Uniform airflow migration strategy" @classmethod def get_goal_name(cls): return "airflow_optimization" @property def granularity(self): return self.input_parameters.get('granularity', 300) @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "threshold_airflow": { "description": ("airflow threshold for migration, Unit is " "0.1CFM"), "type": "number", "default": 400.0 }, "threshold_inlet_t": { "description": ("inlet temperature threshold for " "migration decision"), "type": "number", "default": 28.0 }, "threshold_power": { "description": ("system power threshold for migration " "decision"), "type": "number", "default": 350.0 }, "period": { "description": "aggregate time period of ceilometer", "type": "number", "default": 300 }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "default": 300 }, }, } def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value] return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def calculate_used_resource(self, node): """Compute the used vcpus, memory and disk based on instance flavors""" used_res = self.compute_model.get_node_used_resources(node) return used_res['vcpu'], used_res['memory'], used_res['disk'] def choose_instance_to_migrate(self, hosts): """Pick up an active instance to migrate from provided hosts :param hosts: the array of dict which contains node object """ instances_tobe_migrate = [] for nodemap in hosts: source_node = nodemap['node'] source_instances = self.compute_model.get_node_instances( source_node) if source_instances: inlet_temp = self.datasource_backend.statistic_aggregation( resource=source_node, resource_type='instance', meter_name='host_inlet_temp', period=self._period, granularity=self.granularity) power = self.datasource_backend.statistic_aggregation( resource=source_node, resource_type='instance', meter_name='host_power', period=self._period, granularity=self.granularity) if (power < self.threshold_power and inlet_temp < self.threshold_inlet_t): # hardware issue, migrate all instances from this node for instance in source_instances: instances_tobe_migrate.append(instance) return source_node, instances_tobe_migrate else: # migrate the first active instance for instance in source_instances: # NOTE: skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue if (instance.state != element.InstanceState.ACTIVE.value): LOG.info( "Instance not active, skipped: %s", instance.uuid) continue instances_tobe_migrate.append(instance) return source_node, instances_tobe_migrate else: LOG.info("Instance not found on node: %s", source_node.uuid) def filter_destination_hosts(self, hosts, instances_to_migrate): """Find instance and host with sufficient available resources""" # large instances go first instances_to_migrate = sorted( instances_to_migrate, reverse=True, key=lambda x: (x.vcpus)) # find hosts for instances destination_hosts = [] for instance_to_migrate in instances_to_migrate: required_cores = instance_to_migrate.vcpus required_disk = instance_to_migrate.disk required_mem = instance_to_migrate.memory dest_migrate_info = {} for nodemap in hosts: host = nodemap['node'] if 'cores_used' not in nodemap: # calculate the available resources nodemap['cores_used'], nodemap['mem_used'], \ nodemap['disk_used'] = self.calculate_used_resource( host) cores_available = (host.vcpus - nodemap['cores_used']) disk_available = (host.disk - nodemap['disk_used']) mem_available = ( host.memory - nodemap['mem_used']) if (cores_available >= required_cores and disk_available >= required_disk and mem_available >= required_mem): dest_migrate_info['instance'] = instance_to_migrate dest_migrate_info['node'] = host nodemap['cores_used'] += required_cores nodemap['mem_used'] += required_mem nodemap['disk_used'] += required_disk destination_hosts.append(dest_migrate_info) break # check if all instances have target hosts if len(destination_hosts) != len(instances_to_migrate): LOG.warning("Not all target hosts could be found; it might " "be because there is not enough resource") return None return destination_hosts def group_hosts_by_airflow(self): """Group hosts based on airflow meters""" nodes = self.get_available_compute_nodes() overload_hosts = [] nonoverload_hosts = [] for node_id in nodes: airflow = None node = self.compute_model.get_node_by_uuid( node_id) airflow = self.datasource_backend.statistic_aggregation( resource=node, resource_type='compute_node', meter_name='host_airflow', period=self._period, granularity=self.granularity) # some hosts may not have airflow meter, remove from target if airflow is None: LOG.warning("%s: no airflow data", node.uuid) continue LOG.debug("%(resource)s: airflow %(airflow)f", {'resource': node, 'airflow': airflow}) nodemap = {'node': node, 'airflow': airflow} if airflow >= self.threshold_airflow: # mark the node to release resources overload_hosts.append(nodemap) else: nonoverload_hosts.append(nodemap) return overload_hosts, nonoverload_hosts def pre_execute(self): self._pre_execute() self.meter_name_airflow = 'host_airflow' self.meter_name_inlet_t = 'host_inlet_temp' self.meter_name_power = 'host_power' self.threshold_airflow = self.input_parameters.threshold_airflow self.threshold_inlet_t = self.input_parameters.threshold_inlet_t self.threshold_power = self.input_parameters.threshold_power self._period = self.input_parameters.period def do_execute(self, audit=None): source_nodes, target_nodes = self.group_hosts_by_airflow() if not source_nodes: LOG.debug("No hosts require optimization") return self.solution if not target_nodes: LOG.warning("No hosts currently have airflow under %s, " "therefore there are no possible target " "hosts for any migration", self.threshold_airflow) return self.solution # migrate the instance from server with largest airflow first source_nodes = sorted(source_nodes, reverse=True, key=lambda x: (x["airflow"])) instances_to_migrate = self.choose_instance_to_migrate(source_nodes) if not instances_to_migrate: return self.solution source_node, instances_src = instances_to_migrate # sort host with airflow target_nodes = sorted(target_nodes, key=lambda x: (x["airflow"])) # find the hosts that have enough resource # for the instance to be migrated destination_hosts = self.filter_destination_hosts( target_nodes, instances_src) if not destination_hosts: LOG.warning("No target host could be found; it might " "be because there is not enough resources") return self.solution # generate solution to migrate the instance to the dest server, for info in destination_hosts: instance = info['instance'] destination_node = info['node'] if self.compute_model.migrate_instance( instance, source_node, destination_node): self.add_action_migrate( instance, 'live', source_node, destination_node) def post_execute(self): self.solution.model = self.compute_model # TODO(v-francoise): Add the indicators to the solution LOG.debug(self.compute_model.to_string()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py0000664000175000017500000006506600000000000033310 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Authors: Vojtech CIMA # Bruno GRAZIOLI # Sean MURPHY # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections from oslo_log import log import oslo_utils from watcher._i18n import _ from watcher.applier.actions import migration from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy): """VM Workload Consolidation Strategy A load consolidation strategy based on heuristic first-fit algorithm which focuses on measured CPU utilization and tries to minimize hosts which have too much or too little load respecting resource capacity constraints. This strategy produces a solution resulting in more efficient utilization of cluster resources using following four phases: * Offload phase - handling over-utilized resources * Consolidation phase - handling under-utilized resources * Solution optimization - reducing number of migrations * Disability of unused compute nodes A capacity coefficients (cc) might be used to adjust optimization thresholds. Different resources may require different coefficient values as well as setting up different coefficient values in both phases may lead to more efficient consolidation in the end. If the cc equals 1 the full resource capacity may be used, cc values lower than 1 will lead to resource under utilization and values higher than 1 will lead to resource overbooking. e.g. If targeted utilization is 80 percent of a compute node capacity, the coefficient in the consolidation phase will be 0.8, but may any lower value in the offloading phase. The lower it gets the cluster will appear more released (distributed) for the following consolidation phase. As this strategy leverages VM live migration to move the load from one compute node to another, this feature needs to be set up correctly on all compute nodes within the cluster. This strategy assumes it is possible to live migrate any VM from an active compute node to any other active compute node. """ AGGREGATE = 'mean' DATASOURCE_METRICS = ['instance_ram_allocated', 'instance_cpu_usage', 'instance_ram_usage', 'instance_root_disk_size', 'host_cpu_usage', 'host_ram_usage'] MIGRATION = "migrate" CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" def __init__(self, config, osc=None): super(VMWorkloadConsolidation, self).__init__(config, osc) self.number_of_migrations = 0 self.number_of_released_nodes = 0 self.datasource_instance_data_cache = dict() self.datasource_node_data_cache = dict() # Host metric adjustments that take into account planned # migrations. self.host_metric_delta = collections.defaultdict( lambda: collections.defaultdict(int)) @classmethod def get_name(cls): return "vm_workload_consolidation" @classmethod def get_display_name(cls): return _("VM Workload Consolidation Strategy") @classmethod def get_translatable_display_name(cls): return "VM Workload Consolidation Strategy" @property def period(self): return self.input_parameters.get('period', 3600) @property def granularity(self): return self.input_parameters.get('granularity', 300) @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "period": { "description": "The time interval in seconds for " "getting statistic aggregation", "type": "number", "default": 3600 }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "default": 300 }, } } def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value, element.ServiceState.DISABLED.value] nodes = self.compute_model.get_all_compute_nodes().items() return {uuid: cn for uuid, cn in nodes if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def get_instance_state_str(self, instance): """Get instance state in string format. :param instance: """ if isinstance(instance.state, str): return instance.state elif isinstance(instance.state, element.InstanceState): return instance.state.value else: LOG.error('Unexpected instance state type, ' 'state=%(state)s, state_type=%(st)s.', dict(state=instance.state, st=type(instance.state))) raise exception.WatcherException def get_node_status_str(self, node): """Get node status in string format. :param node: """ if isinstance(node.status, str): return node.status elif isinstance(node.status, element.ServiceState): return node.status.value else: LOG.error('Unexpected node status type, ' 'status=%(status)s, status_type=%(st)s.', dict(status=node.status, st=type(node.status))) raise exception.WatcherException def add_action_enable_compute_node(self, node): """Add an action for node enabler into the solution. :param node: node object :return: None """ params = {'state': element.ServiceState.ENABLED.value, 'resource_name': node.hostname} self.solution.add_action( action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=params) self.number_of_released_nodes -= 1 def add_action_disable_node(self, node): """Add an action for node disability into the solution. :param node: node object :return: None """ params = {'state': element.ServiceState.DISABLED.value, 'disabled_reason': self.REASON_FOR_DISABLE, 'resource_name': node.hostname} self.solution.add_action( action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=params) self.number_of_released_nodes += 1 def add_migration(self, instance, source_node, destination_node): """Add an action for VM migration into the solution. :param instance: instance object :param source_node: node object :param destination_node: node object :return: None """ instance_state_str = self.get_instance_state_str(instance) if instance_state_str in (element.InstanceState.ACTIVE.value, element.InstanceState.PAUSED.value): migration_type = migration.Migrate.LIVE_MIGRATION elif instance_state_str == element.InstanceState.STOPPED.value: migration_type = migration.Migrate.COLD_MIGRATION else: LOG.error( 'Cannot live migrate: instance_uuid=%(instance_uuid)s, ' 'state=%(instance_state)s.', dict( instance_uuid=instance.uuid, instance_state=instance_state_str)) return # Here will makes repeated actions to enable the same compute node, # when migrating VMs to the destination node which is disabled. # Whether should we remove the same actions in the solution??? destination_node_status_str = self.get_node_status_str( destination_node) if destination_node_status_str == element.ServiceState.DISABLED.value: self.add_action_enable_compute_node(destination_node) if self.compute_model.migrate_instance( instance, source_node, destination_node): self.add_action_migrate( instance, migration_type, source_node, destination_node) self.number_of_migrations += 1 instance_util = self.get_instance_utilization(instance) self.host_metric_delta[source_node.hostname]['cpu'] -= ( instance_util['cpu']) # We'll deduce the vm allocated memory. self.host_metric_delta[source_node.hostname]['ram'] -= ( instance.memory) self.host_metric_delta[destination_node.hostname]['cpu'] += ( instance_util['cpu']) self.host_metric_delta[destination_node.hostname]['ram'] += ( instance.memory) def disable_unused_nodes(self): """Generate actions for disabling unused nodes. :return: None """ for node in self.get_available_compute_nodes().values(): if (len(self.compute_model.get_node_instances(node)) == 0 and node.status != element.ServiceState.DISABLED.value): self.add_action_disable_node(node) def get_instance_utilization(self, instance): """Collect cpu, ram and disk utilization statistics of a VM. :param instance: instance object :param aggr: string :return: dict(cpu(number of vcpus used), ram(MB used), disk(B used)) """ instance_cpu_util = None instance_ram_util = None instance_disk_util = None if instance.uuid in self.datasource_instance_data_cache.keys(): return self.datasource_instance_data_cache.get(instance.uuid) instance_cpu_util = self.datasource_backend.get_instance_cpu_usage( resource=instance, period=self.period, aggregate=self.AGGREGATE, granularity=self.granularity) instance_ram_util = self.datasource_backend.get_instance_ram_usage( resource=instance, period=self.period, aggregate=self.AGGREGATE, granularity=self.granularity) if not instance_ram_util: instance_ram_util = ( self.datasource_backend.get_instance_ram_allocated( resource=instance, period=self.period, aggregate=self.AGGREGATE, granularity=self.granularity)) instance_disk_util = ( self.datasource_backend.get_instance_root_disk_size( resource=instance, period=self.period, aggregate=self.AGGREGATE, granularity=self.granularity)) if instance_cpu_util: total_cpu_utilization = ( instance.vcpus * (instance_cpu_util / 100.0)) else: total_cpu_utilization = instance.vcpus if not instance_ram_util: instance_ram_util = instance.memory LOG.warning('No values returned by %s for memory.resident, ' 'use instance flavor ram value', instance.uuid) if not instance_disk_util: instance_disk_util = instance.disk LOG.warning('No values returned by %s for disk.root.size, ' 'use instance flavor disk value', instance.uuid) self.datasource_instance_data_cache[instance.uuid] = dict( cpu=total_cpu_utilization, ram=instance_ram_util, disk=instance_disk_util) return self.datasource_instance_data_cache.get(instance.uuid) def _get_node_total_utilization(self, node): if node.hostname in self.datasource_node_data_cache: return self.datasource_node_data_cache[node.hostname] cpu = self.datasource_backend.get_host_cpu_usage( node, self.period, self.AGGREGATE, self.granularity) ram = self.datasource_backend.get_host_ram_usage( node, self.period, self.AGGREGATE, self.granularity) self.datasource_node_data_cache[node.hostname] = dict( cpu=cpu, ram=ram) return self.datasource_node_data_cache[node.hostname] def get_node_utilization(self, node): """Collect cpu, ram and disk utilization statistics of a node. :param node: node object :param aggr: string :return: dict(cpu(number of cores used), ram(MB used), disk(B used)) """ node_instances = self.compute_model.get_node_instances(node) node_ram_util = 0 node_disk_util = 0 node_cpu_util = 0 for instance in node_instances: instance_util = self.get_instance_utilization( instance) node_cpu_util += instance_util['cpu'] node_ram_util += instance_util['ram'] node_disk_util += instance_util['disk'] LOG.debug("instance utilization: %s %s", instance, instance_util) total_node_util = self._get_node_total_utilization(node) total_node_cpu_util = total_node_util['cpu'] or 0 if total_node_cpu_util: total_node_cpu_util = total_node_cpu_util * node.vcpus / 100 # account for planned migrations total_node_cpu_util += self.host_metric_delta[node.hostname]['cpu'] total_node_ram_util = total_node_util['ram'] or 0 if total_node_ram_util: total_node_ram_util /= oslo_utils.units.Ki total_node_ram_util += self.host_metric_delta[node.hostname]['ram'] LOG.debug( "node utilization: %s. " "total instance cpu: %s, " "total instance ram: %s, " "total instance disk: %s, " "total host cpu: %s, " "total host ram: %s, " "node delta usage: %s.", node, node_cpu_util, node_ram_util, node_disk_util, total_node_cpu_util, total_node_ram_util, self.host_metric_delta[node.hostname]) return dict(cpu=max(node_cpu_util, total_node_cpu_util), ram=max(node_ram_util, total_node_ram_util), disk=node_disk_util) def get_node_capacity(self, node): """Collect cpu, ram and disk capacity of a node. :param node: node object :return: dict(cpu(cores), ram(MB), disk(B)) """ return dict(cpu=node.vcpu_capacity, ram=node.memory_mb_capacity, disk=node.disk_gb_capacity) def get_relative_node_utilization(self, node): """Return relative node utilization. :param node: node object :return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>} """ relative_node_utilization = {} util = self.get_node_utilization(node) cap = self.get_node_capacity(node) for k in util.keys(): relative_node_utilization[k] = float(util[k]) / float(cap[k]) return relative_node_utilization def get_relative_cluster_utilization(self): """Calculate relative cluster utilization (rcu). RCU is an average of relative utilizations (rhu) of active nodes. :return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>} """ nodes = self.get_available_compute_nodes().values() rcu = {} counters = {} for node in nodes: node_status_str = self.get_node_status_str(node) if node_status_str == element.ServiceState.ENABLED.value: rhu = self.get_relative_node_utilization(node) for k in rhu.keys(): if k not in rcu: rcu[k] = 0 if k not in counters: counters[k] = 0 rcu[k] += rhu[k] counters[k] += 1 for k in rcu.keys(): rcu[k] /= counters[k] return rcu def is_overloaded(self, node, cc): """Indicate whether a node is overloaded. This considers provided resource capacity coefficients (cc). :param node: node object :param cc: dictionary containing resource capacity coefficients :return: [True, False] """ node_capacity = self.get_node_capacity(node) node_utilization = self.get_node_utilization( node) metrics = ['cpu'] for m in metrics: if node_utilization[m] > node_capacity[m] * cc[m]: return True return False def instance_fits(self, instance, node, cc): """Indicate whether is a node able to accommodate a VM. This considers provided resource capacity coefficients (cc). :param instance: :py:class:`~.element.Instance` :param node: node object :param cc: dictionary containing resource capacity coefficients :return: [True, False] """ node_capacity = self.get_node_capacity(node) node_utilization = self.get_node_utilization(node) instance_utilization = self.get_instance_utilization(instance) metrics = ['cpu', 'ram', 'disk'] for m in metrics: fits = (instance_utilization[m] + node_utilization[m] <= node_capacity[m] * cc[m]) LOG.debug( "Instance fits: %s, metric: %s, instance: %s, " "node: %s, instance utilization: %s, " "node utilization: %s, node capacity: %s, cc: %s", fits, m, instance, node, instance_utilization[m], node_utilization[m], node_capacity[m], cc[m]) if not fits: return False return True def optimize_solution(self): """Optimize solution. This is done by eliminating unnecessary or circular set of migrations which can be replaced by a more efficient solution. e.g.: * A->B, B->C => replace migrations A->B, B->C with a single migration A->C as both solution result in VM running on node C which can be achieved with one migration instead of two. * A->B, B->A => remove A->B and B->A as they do not result in a new VM placement. """ migrate_actions = ( a for a in self.solution.actions if a[ 'action_type'] == self.MIGRATION) instance_to_be_migrated = ( a['input_parameters']['resource_id'] for a in migrate_actions) instance_uuids = list(set(instance_to_be_migrated)) for instance_uuid in instance_uuids: actions = list( a for a in self.solution.actions if a[ 'input_parameters'][ 'resource_id'] == instance_uuid) if len(actions) > 1: src_name = actions[0]['input_parameters']['source_node'] dst_name = actions[-1]['input_parameters']['destination_node'] for a in actions: self.solution.actions.remove(a) self.number_of_migrations -= 1 LOG.info("Optimized migrations: %s. " "Source: %s, destination: %s", actions, src_name, dst_name) src_node = self.compute_model.get_node_by_name(src_name) dst_node = self.compute_model.get_node_by_name(dst_name) instance = self.compute_model.get_instance_by_uuid( instance_uuid) if self.compute_model.migrate_instance( instance, dst_node, src_node): self.add_migration(instance, src_node, dst_node) def offload_phase(self, cc): """Perform offloading phase. This considers provided resource capacity coefficients. Offload phase performing first-fit based bin packing to offload overloaded nodes. This is done in a fashion of moving the least CPU utilized VM first as live migration these generally causes less troubles. This phase results in a cluster with no overloaded nodes. * This phase is be able to enable disabled nodes (if needed and any available) in the case of the resource capacity provided by active nodes is not able to accommodate all the load. As the offload phase is later followed by the consolidation phase, the node enabler in this phase doesn't necessarily results in more enabled nodes in the final solution. :param cc: dictionary containing resource capacity coefficients """ sorted_nodes = sorted( self.get_available_compute_nodes().values(), key=lambda x: self.get_node_utilization(x)['cpu']) for node in reversed(sorted_nodes): if self.is_overloaded(node, cc): for instance in sorted( self.compute_model.get_node_instances(node), key=lambda x: self.get_instance_utilization( x)['cpu'] ): LOG.info("Node %s overloaded, attempting to reduce load.", node) # skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue for destination_node in reversed(sorted_nodes): if self.instance_fits( instance, destination_node, cc): LOG.info("Offload: found fitting " "destination (%s) for instance: %s. " "Planning migration.", destination_node, instance.uuid) self.add_migration(instance, node, destination_node) break if not self.is_overloaded(node, cc): LOG.info("Node %s no longer overloaded.", node) break else: LOG.info("Node still overloaded (%s), " "continuing offload phase.", node) def consolidation_phase(self, cc): """Perform consolidation phase. This considers provided resource capacity coefficients. Consolidation phase performing first-fit based bin packing. First, nodes with the lowest cpu utilization are consolidated by moving their load to nodes with the highest cpu utilization which can accommodate the load. In this phase the most cpu utilized VMs are prioritized as their load is more difficult to accommodate in the system than less cpu utilized VMs which can be later used to fill smaller CPU capacity gaps. :param cc: dictionary containing resource capacity coefficients """ sorted_nodes = sorted( self.get_available_compute_nodes().values(), key=lambda x: self.get_node_utilization(x)['cpu']) asc = 0 for node in sorted_nodes: instances = sorted( self.compute_model.get_node_instances(node), key=lambda x: self.get_instance_utilization(x)['cpu']) for instance in reversed(instances): # skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue dsc = len(sorted_nodes) - 1 for destination_node in reversed(sorted_nodes): if asc >= dsc: break if self.instance_fits( instance, destination_node, cc): LOG.info("Consolidation: found fitting " "destination (%s) for instance: %s. " "Planning migration.", destination_node, instance.uuid) self.add_migration(instance, node, destination_node) break dsc -= 1 asc += 1 def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): """Execute strategy. This strategy produces a solution resulting in more efficient utilization of cluster resources using following four phases: * Offload phase - handling over-utilized resources * Consolidation phase - handling under-utilized resources * Solution optimization - reducing number of migrations * Disability of unused nodes :param original_model: root_model object """ LOG.info('Executing Smart Strategy') rcu = self.get_relative_cluster_utilization() cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} # Offloading phase self.offload_phase(cc) # Consolidation phase self.consolidation_phase(cc) # Optimize solution self.optimize_solution() # disable unused nodes self.disable_unused_nodes() rcu_after = self.get_relative_cluster_utilization() info = { "compute_nodes_count": len( self.get_available_compute_nodes()), 'number_of_migrations': self.number_of_migrations, 'number_of_released_nodes': self.number_of_released_nodes, 'relative_cluster_utilization_before': str(rcu), 'relative_cluster_utilization_after': str(rcu_after) } LOG.debug(info) def post_execute(self): self.solution.set_efficacy_indicators( compute_nodes_count=len( self.get_available_compute_nodes()), released_compute_nodes_count=self.number_of_released_nodes, instance_migrations_count=self.number_of_migrations, ) LOG.debug(self.compute_model.to_string()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/workload_balance.py0000664000175000017500000003377500000000000031330 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Junjie-Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class WorkloadBalance(base.WorkloadStabilizationBaseStrategy): """[PoC]Workload balance using live migration *Description* It is a migration strategy based on the VM workload of physical servers. It generates solutions to move a workload whenever a server's CPU or RAM utilization % is higher than the specified threshold. The VM to be moved should make the host close to average workload of all compute nodes. *Requirements* * Hardware: compute node should use the same physical CPUs/RAMs * Software: Ceilometer component ceilometer-agent-compute running in each compute node, and Ceilometer API can report such telemetry "instance_cpu_usage" and "instance_ram_usage" successfully. * You must have at least 2 physical compute nodes to run this strategy. *Limitations* - This is a proof of concept that is not meant to be used in production - We cannot forecast how many servers should be migrated. This is the reason why we only plan a single virtual machine migration at a time. So it's better to use this algorithm with `CONTINUOUS` audits. - It assume that live migrations are possible """ # The meter to report CPU utilization % of VM in ceilometer # Unit: %, value range is [0 , 100] # The meter to report memory resident of VM in ceilometer # Unit: MB DATASOURCE_METRICS = ['instance_cpu_usage', 'instance_ram_usage'] def __init__(self, config, osc=None): """Workload balance using live migration :param config: A mapping containing the configuration of this strategy :type config: :py:class:`~.Struct` instance :param osc: :py:class:`~.OpenStackClients` instance """ super(WorkloadBalance, self).__init__(config, osc) # the migration plan will be triggered when the CPU or RAM # utilization % reaches threshold self._meter = None self.instance_migrations_count = 0 @classmethod def get_name(cls): return "workload_balance" @classmethod def get_display_name(cls): return _("Workload Balance Migration Strategy") @classmethod def get_translatable_display_name(cls): return "Workload Balance Migration Strategy" @property def granularity(self): return self.input_parameters.get('granularity', 300) @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "metrics": { "description": "Workload balance based on metrics: " "cpu or ram utilization", "type": "string", "choice": ["instance_cpu_usage", "instance_ram_usage"], "default": "instance_cpu_usage" }, "threshold": { "description": "workload threshold for migration", "type": "number", "default": 25.0 }, "period": { "description": "aggregate time period of ceilometer", "type": "number", "default": 300 }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "default": 300 }, }, } def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value] return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def choose_instance_to_migrate(self, hosts, avg_workload, workload_cache): """Pick up an active instance to migrate from provided hosts :param hosts: the array of dict which contains node object :param avg_workload: the average workload value of all nodes :param workload_cache: the map contains instance to workload mapping """ for instance_data in hosts: source_node = instance_data['compute_node'] source_instances = self.compute_model.get_node_instances( source_node) if source_instances: delta_workload = instance_data['workload'] - avg_workload min_delta = 1000000 instance_id = None for instance in source_instances: try: # NOTE: skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue # select the first active VM to migrate if (instance.state != element.InstanceState.ACTIVE.value): LOG.debug("Instance not active, skipped: %s", instance.uuid) continue current_delta = ( delta_workload - workload_cache[instance.uuid]) if 0 <= current_delta < min_delta: min_delta = current_delta instance_id = instance.uuid except exception.InstanceNotFound: LOG.error("Instance not found; error: %s", instance_id) if instance_id: return (source_node, self.compute_model.get_instance_by_uuid( instance_id)) else: LOG.info("VM not found from compute_node: %s", source_node.uuid) def filter_destination_hosts(self, hosts, instance_to_migrate, avg_workload, workload_cache): """Only return hosts with sufficient available resources""" required_cores = instance_to_migrate.vcpus required_disk = instance_to_migrate.disk required_mem = instance_to_migrate.memory # filter nodes without enough resource destination_hosts = [] src_instance_workload = workload_cache[instance_to_migrate.uuid] for instance_data in hosts: host = instance_data['compute_node'] workload = instance_data['workload'] # calculate the available resources free_res = self.compute_model.get_node_free_resources(host) if (free_res['vcpu'] >= required_cores and free_res['memory'] >= required_mem and free_res['disk'] >= required_disk): if (self._meter == 'instance_cpu_usage' and ((src_instance_workload + workload) < self.threshold / 100 * host.vcpus)): destination_hosts.append(instance_data) if (self._meter == 'instance_ram_usage' and ((src_instance_workload + workload) < self.threshold / 100 * host.memory)): destination_hosts.append(instance_data) return destination_hosts def group_hosts_by_cpu_or_ram_util(self): """Calculate the workloads of each compute_node try to find out the nodes which have reached threshold and the nodes which are under threshold. and also calculate the average workload value of all nodes. and also generate the instance workload map. """ nodes = self.get_available_compute_nodes() cluster_size = len(nodes) overload_hosts = [] nonoverload_hosts = [] # total workload of cluster cluster_workload = 0.0 # use workload_cache to store the workload of VMs for reuse purpose workload_cache = {} for node_id in nodes: node = self.compute_model.get_node_by_uuid(node_id) instances = self.compute_model.get_node_instances(node) node_workload = 0.0 for instance in instances: util = None try: util = self.datasource_backend.statistic_aggregation( instance, 'instance', self._meter, self._period, 'mean', self._granularity) except Exception as exc: LOG.exception(exc) LOG.error("Can not get %s from %s", self._meter, self.datasource_backend.NAME) continue if util is None: LOG.debug("Instance (%s): %s is None", instance.uuid, self._meter) continue if self._meter == 'instance_cpu_usage': workload_cache[instance.uuid] = (util * instance.vcpus / 100) else: workload_cache[instance.uuid] = util node_workload += workload_cache[instance.uuid] LOG.debug("VM (%s): %s %f", instance.uuid, self._meter, util) cluster_workload += node_workload if self._meter == 'instance_cpu_usage': node_util = node_workload / node.vcpus * 100 else: node_util = node_workload / node.memory * 100 instance_data = { 'compute_node': node, self._meter: node_util, 'workload': node_workload} if node_util >= self.threshold: # mark the node to release resources overload_hosts.append(instance_data) else: nonoverload_hosts.append(instance_data) avg_workload = 0 if cluster_size != 0: avg_workload = cluster_workload / cluster_size return overload_hosts, nonoverload_hosts, avg_workload, workload_cache def pre_execute(self): self._pre_execute() self.threshold = self.input_parameters.threshold self._period = self.input_parameters.period self._meter = self.input_parameters.metrics self._granularity = self.input_parameters.granularity def do_execute(self, audit=None): """Strategy execution phase This phase is where you should put the main logic of your strategy. """ source_nodes, target_nodes, avg_workload, workload_cache = ( self.group_hosts_by_cpu_or_ram_util()) if not source_nodes: LOG.debug("No hosts require optimization") return self.solution if not target_nodes: LOG.warning("No hosts current have CPU utilization under %s " "percent, therefore there are no possible target " "hosts for any migration", self.threshold) return self.solution # choose the server with largest cpu usage source_nodes = sorted(source_nodes, reverse=True, key=lambda x: (x[self._meter])) instance_to_migrate = self.choose_instance_to_migrate( source_nodes, avg_workload, workload_cache) if not instance_to_migrate: return self.solution source_node, instance_src = instance_to_migrate # find the hosts that have enough resource for the VM to be migrated destination_hosts = self.filter_destination_hosts( target_nodes, instance_src, avg_workload, workload_cache) # sort the filtered result by workload # pick up the lowest one as dest server if not destination_hosts: # for instance. LOG.warning("No proper target host could be found, it might " "be because of there's no enough CPU/Memory/DISK") return self.solution destination_hosts = sorted(destination_hosts, key=lambda x: (x[self._meter])) # always use the host with lowerest CPU utilization mig_destination_node = destination_hosts[0]['compute_node'] # generate solution to migrate the instance to the dest server, if self.compute_model.migrate_instance( instance_src, source_node, mig_destination_node): self.add_action_migrate( instance_src, 'live', source_node, mig_destination_node) self.instance_migrations_count += 1 def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ self.solution.model = self.compute_model self.solution.set_efficacy_indicators( instance_migrations_count=self.instance_migrations_count, instances_count=len(self.compute_model.get_all_instances()) ) LOG.debug(self.compute_model.to_string()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/workload_stabilization.py0000664000175000017500000006050600000000000032607 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica LLC # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import itertools import math import random import oslo_cache from oslo_config import cfg from oslo_log import log import oslo_utils from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) CONF = cfg.CONF def _set_memoize(conf): oslo_cache.configure(conf) region = oslo_cache.create_region() configured_region = oslo_cache.configure_cache_region(conf, region) return oslo_cache.core.get_memoization_decorator(conf, configured_region, 'cache') class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy): """Workload Stabilization control using live migration This is workload stabilization strategy based on standard deviation algorithm. The goal is to determine if there is an overload in a cluster and respond to it by migrating VMs to stabilize the cluster. This strategy has been tested in a small (32 nodes) cluster. It assumes that live migrations are possible in your cluster. """ MEMOIZE = _set_memoize(CONF) DATASOURCE_METRICS = ['host_cpu_usage', 'instance_cpu_usage', 'instance_ram_usage', 'host_ram_usage'] def __init__(self, config, osc=None): """Workload Stabilization control using live migration :param config: A mapping containing the configuration of this strategy :type config: :py:class:`~.Struct` instance :param osc: :py:class:`~.OpenStackClients` instance """ super(WorkloadStabilization, self).__init__(config, osc) self.weights = None self.metrics = None self.thresholds = None self.host_choice = None self.instance_metrics = None self.retry_count = None self.periods = None self.aggregation_method = None self.sd_before_audit = 0 self.sd_after_audit = 0 self.instance_migrations_count = 0 self.instances_count = 0 @classmethod def get_name(cls): return "workload_stabilization" @classmethod def get_display_name(cls): return _("Workload stabilization") @classmethod def get_translatable_display_name(cls): return "Workload stabilization" @property def granularity(self): return self.input_parameters.get('granularity', 300) @classmethod def get_schema(cls): return { "properties": { "metrics": { "description": "Metrics used as rates of cluster loads.", "type": "array", "items": { "type": "string", "enum": ["instance_cpu_usage", "instance_ram_usage"] }, "default": ["instance_cpu_usage"] }, "thresholds": { "description": "Dict where key is a metric and value " "is a trigger value.", "type": "object", "properties": { "instance_cpu_usage": { "type": "number", "minimum": 0, "maximum": 1 }, "instance_ram_usage": { "type": "number", "minimum": 0, "maximum": 1 } }, "default": {"instance_cpu_usage": 0.1, "instance_ram_usage": 0.1} }, "weights": { "description": "These weights used to calculate " "common standard deviation. Name of weight" " contains meter name and _weight suffix.", "type": "object", "properties": { "instance_cpu_usage_weight": { "type": "number", "minimum": 0, "maximum": 1 }, "instance_ram_usage_weight": { "type": "number", "minimum": 0, "maximum": 1 } }, "default": {"instance_cpu_usage_weight": 1.0, "instance_ram_usage_weight": 1.0} }, "instance_metrics": { "description": "Mapping to get hardware statistics using" " instance metrics", "type": "object", "default": {"instance_cpu_usage": "host_cpu_usage", "instance_ram_usage": "host_ram_usage"} }, "host_choice": { "description": "Method of host's choice. There are cycle," " retry and fullsearch methods. " "Cycle will iterate hosts in cycle. " "Retry will get some hosts random " "(count defined in retry_count option). " "Fullsearch will return each host " "from list.", "type": "string", "default": "retry" }, "retry_count": { "description": "Count of random returned hosts", "type": "number", "minimum": 1, "default": 1 }, "periods": { "description": "These periods are used to get statistic " "aggregation for instance and host " "metrics. The period is simply a repeating" " interval of time into which the samples" " are grouped for aggregation. Watcher " "uses only the last period of all received" " ones.", "type": "object", "properties": { "instance": { "type": "integer", "minimum": 0 }, "compute_node": { "type": "integer", "minimum": 0 }, "node": { "type": "integer", # node is deprecated "minimum": 0, "default": 0 }, }, "default": { "instance": 720, "compute_node": 600, # node is deprecated "node": 0, } }, "aggregation_method": { "description": "Function used to aggregate multiple " "measures into an aggregate. For example, " "the min aggregation method will aggregate " "the values of different measures to the " "minimum value of all the measures in the " "time range.", "type": "object", "properties": { "instance": { "type": "string", "default": 'mean' }, "compute_node": { "type": "string", "default": 'mean' }, # node is deprecated "node": { "type": "string", "default": '' }, }, "default": { "instance": 'mean', "compute_node": 'mean', # node is deprecated "node": '', } }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "minimum": 0, "default": 300 }, } } def transform_instance_cpu(self, instance_load, host_vcpus): """Transform instance cpu utilization to overall host cpu utilization. :param instance_load: dict that contains instance uuid and utilization info. :param host_vcpus: int :return: float value """ return (instance_load['instance_cpu_usage'] * (instance_load['vcpus'] / float(host_vcpus))) @MEMOIZE def get_instance_load(self, instance): """Gathering instance load through ceilometer/gnocchi statistic. :param instance: instance for which statistic is gathered. :return: dict """ LOG.debug('Getting load for %s', instance.uuid) instance_load = {'uuid': instance.uuid, 'vcpus': instance.vcpus} for meter in self.metrics: avg_meter = self.datasource_backend.statistic_aggregation( instance, 'instance', meter, self.periods['instance'], self.aggregation_method['instance'], self.granularity) if avg_meter is None: LOG.warning( "No values returned by %(resource_id)s " "for %(metric_name)s", dict( resource_id=instance.uuid, metric_name=meter)) return if meter == 'instance_cpu_usage': avg_meter /= float(100) LOG.debug('Load of %(metric)s for %(instance)s is %(value)s', {'metric': meter, 'instance': instance.uuid, 'value': avg_meter}) instance_load[meter] = avg_meter return instance_load def normalize_hosts_load(self, hosts): normalized_hosts = copy.deepcopy(hosts) for host in normalized_hosts: if 'instance_ram_usage' in normalized_hosts[host]: node = self.compute_model.get_node_by_uuid(host) normalized_hosts[host]['instance_ram_usage'] \ /= float(node.memory) return normalized_hosts def get_available_nodes(self): nodes = self.compute_model.get_all_compute_nodes().items() return {node_uuid: node for node_uuid, node in nodes if node.state == element.ServiceState.ONLINE.value and node.status == element.ServiceState.ENABLED.value} def get_hosts_load(self): """Get load of every available host by gathering instances load""" hosts_load = {} for node_id, node in self.get_available_nodes().items(): hosts_load[node_id] = {} hosts_load[node_id]['vcpus'] = node.vcpus LOG.debug('Getting load for %s', node_id) for metric in self.metrics: avg_meter = None meter_name = self.instance_metrics[metric] avg_meter = self.datasource_backend.statistic_aggregation( node, 'compute_node', self.instance_metrics[metric], self.periods['compute_node'], self.aggregation_method['compute_node'], self.granularity) if avg_meter is None: LOG.warning('No values returned by node %s for %s', node_id, meter_name) del hosts_load[node_id] break else: if meter_name == 'host_ram_usage': avg_meter /= oslo_utils.units.Ki if meter_name == 'host_cpu_usage': avg_meter /= 100 LOG.debug('Load of %(metric)s for %(node)s is %(value)s', {'metric': metric, 'node': node_id, 'value': avg_meter}) hosts_load[node_id][metric] = avg_meter return hosts_load def get_sd(self, hosts, meter_name): """Get standard deviation among hosts by specified meter""" mean = 0 variation = 0 num_hosts = len(hosts) if num_hosts == 0: return 0 for host_id in hosts: mean += hosts[host_id][meter_name] mean /= num_hosts for host_id in hosts: variation += (hosts[host_id][meter_name] - mean) ** 2 variation /= num_hosts sd = math.sqrt(variation) return sd def calculate_weighted_sd(self, sd_case): """Calculate common standard deviation among meters on host""" weighted_sd = 0 for metric, value in zip(self.metrics, sd_case): try: weighted_sd += value * float(self.weights[metric + '_weight']) except KeyError as exc: LOG.exception(exc) raise exception.WatcherException( _("Incorrect mapping: could not find associated weight" " for %s in weight dict.") % metric) return weighted_sd def calculate_migration_case(self, hosts, instance, src_node, dst_node): """Calculate migration case Return list of standard deviation values, that appearing in case of migration of instance from source host to destination host :param hosts: hosts with their workload :param instance: the virtual machine :param src_node: the source node :param dst_node: the destination node :return: list of standard deviation values """ migration_case = [] new_hosts = copy.deepcopy(hosts) instance_load = self.get_instance_load(instance) if not instance_load: return s_host_vcpus = new_hosts[src_node.uuid]['vcpus'] d_host_vcpus = new_hosts[dst_node.uuid]['vcpus'] for metric in self.metrics: if metric == 'instance_cpu_usage': new_hosts[src_node.uuid][metric] -= ( self.transform_instance_cpu(instance_load, s_host_vcpus)) new_hosts[dst_node.uuid][metric] += ( self.transform_instance_cpu(instance_load, d_host_vcpus)) else: new_hosts[src_node.uuid][metric] -= instance_load[metric] new_hosts[dst_node.uuid][metric] += instance_load[metric] normalized_hosts = self.normalize_hosts_load(new_hosts) for metric in self.metrics: migration_case.append(self.get_sd(normalized_hosts, metric)) migration_case.append(new_hosts) return migration_case def get_current_weighted_sd(self, hosts_load): """Calculate current weighted sd""" current_sd = [] normalized_load = self.normalize_hosts_load(hosts_load) for metric in self.metrics: metric_sd = self.get_sd(normalized_load, metric) current_sd.append(metric_sd) current_sd.append(hosts_load) return self.calculate_weighted_sd(current_sd[:-1]) def simulate_migrations(self, hosts): """Make sorted list of pairs instance:dst_host""" def yield_nodes(nodes): if self.host_choice == 'cycle': for i in itertools.cycle(nodes): yield [i] if self.host_choice == 'retry': while True: yield random.sample(nodes, self.retry_count) if self.host_choice == 'fullsearch': while True: yield nodes instance_host_map = [] nodes = sorted(list(self.get_available_nodes())) current_weighted_sd = self.get_current_weighted_sd(hosts) for src_host in nodes: src_node = self.compute_model.get_node_by_uuid(src_host) c_nodes = copy.copy(nodes) c_nodes.remove(src_host) node_list = yield_nodes(c_nodes) for instance in self.compute_model.get_node_instances(src_node): # NOTE: skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue if instance.state not in [element.InstanceState.ACTIVE.value, element.InstanceState.PAUSED.value]: continue min_sd_case = {'value': current_weighted_sd} for dst_host in next(node_list): dst_node = self.compute_model.get_node_by_uuid(dst_host) sd_case = self.calculate_migration_case( hosts, instance, src_node, dst_node) if sd_case is None: break weighted_sd = self.calculate_weighted_sd(sd_case[:-1]) if weighted_sd < min_sd_case['value']: min_sd_case = { 'host': dst_node.uuid, 'value': weighted_sd, 's_host': src_node.uuid, 'instance': instance.uuid} instance_host_map.append(min_sd_case) if sd_case is None: continue return sorted(instance_host_map, key=lambda x: x['value']) def check_threshold(self): """Check if cluster is needed in balancing""" hosts_load = self.get_hosts_load() normalized_load = self.normalize_hosts_load(hosts_load) for metric in self.metrics: metric_sd = self.get_sd(normalized_load, metric) LOG.info("Standard deviation for %(metric)s is %(sd)s.", {'metric': metric, 'sd': metric_sd}) if metric_sd > float(self.thresholds[metric]): LOG.info("Standard deviation of %(metric)s exceeds" " appropriate threshold %(threshold)s by %(sd)s.", {'metric': metric, 'threshold': float(self.thresholds[metric]), 'sd': metric_sd}) LOG.info("Launching workload optimization...") self.sd_before_audit = metric_sd return self.simulate_migrations(hosts_load) def create_migration_instance(self, mig_instance, mig_source_node, mig_destination_node): """Create migration VM""" if self.compute_model.migrate_instance( mig_instance, mig_source_node, mig_destination_node): self.add_action_migrate(mig_instance, 'live', mig_source_node, mig_destination_node) self.instance_migrations_count += 1 def migrate(self, instance_uuid, src_host, dst_host): mig_instance = self.compute_model.get_instance_by_uuid(instance_uuid) mig_source_node = self.compute_model.get_node_by_uuid( src_host) mig_destination_node = self.compute_model.get_node_by_uuid( dst_host) self.create_migration_instance(mig_instance, mig_source_node, mig_destination_node) def fill_solution(self): self.solution.model = self.compute_model return self.solution def pre_execute(self): self._pre_execute() self.weights = self.input_parameters.weights self.metrics = self.input_parameters.metrics self.thresholds = self.input_parameters.thresholds self.host_choice = self.input_parameters.host_choice self.instance_metrics = self.input_parameters.instance_metrics self.retry_count = self.input_parameters.retry_count self.periods = self.input_parameters.periods self.aggregation_method = self.input_parameters.aggregation_method # backwards compatibility for node parameter with aggregate. if self.aggregation_method['node']: LOG.warning('Parameter node has been renamed to compute_node and ' 'will be removed in next release.') self.aggregation_method['compute_node'] = \ self.aggregation_method['node'] # backwards compatibility for node parameter with period. if self.periods['node'] != 0: LOG.warning('Parameter node has been renamed to compute_node and ' 'will be removed in next release.') self.periods['compute_node'] = self.periods['node'] def do_execute(self, audit=None): migration = self.check_threshold() if migration: hosts_load = self.get_hosts_load() min_sd = 1 balanced = False for instance_host in migration: instance = self.compute_model.get_instance_by_uuid( instance_host['instance']) src_node = self.compute_model.get_node_by_uuid( instance_host['s_host']) dst_node = self.compute_model.get_node_by_uuid( instance_host['host']) if instance.disk > dst_node.disk: continue instance_load = self.calculate_migration_case( hosts_load, instance, src_node, dst_node) weighted_sd = self.calculate_weighted_sd(instance_load[:-1]) if weighted_sd < min_sd: min_sd = weighted_sd hosts_load = instance_load[-1] LOG.info("Migration of %(instance_uuid)s from %(s_host)s " "to %(host)s reduces standard deviation to " "%(min_sd)s.", {'instance_uuid': instance_host['instance'], 's_host': instance_host['s_host'], 'host': instance_host['host'], 'min_sd': min_sd}) self.migrate(instance_host['instance'], instance_host['s_host'], instance_host['host']) self.sd_after_audit = min_sd for metric, value in zip(self.metrics, instance_load[:-1]): if value < float(self.thresholds[metric]): LOG.info("At least one of metrics' values fell " "below the threshold values. " "Workload Stabilization has successfully " "completed optimization process.") balanced = True break if balanced: break def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ self.fill_solution() self.solution.set_efficacy_indicators( instance_migrations_count=self.instance_migrations_count, standard_deviation_before_audit=self.sd_before_audit, standard_deviation_after_audit=self.sd_after_audit, instances_count=len(self.compute_model.get_all_instances()), ) LOG.debug(self.compute_model.to_string()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/strategy/strategies/zone_migration.py0000664000175000017500000010065400000000000031054 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from dateutil.parser import parse from oslo_log import log from cinderclient.v3.volumes import Volume from novaclient.v2.servers import Server from watcher._i18n import _ from watcher.common import cinder_helper from watcher.common import nova_helper from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) INSTANCE = "instance" VOLUME = "volume" ACTIVE = "active" PAUSED = 'paused' STOPPED = "stopped" status_ACTIVE = 'ACTIVE' status_PAUSED = 'PAUSED' status_SHUTOFF = 'SHUTOFF' AVAILABLE = "available" IN_USE = "in-use" class ZoneMigration(base.ZoneMigrationBaseStrategy): """Zone migration using instance and volume migration This is zone migration strategy to migrate many instances and volumes efficiently with minimum downtime for hardware maintenance. """ def __init__(self, config, osc=None): super(ZoneMigration, self).__init__(config, osc) self._nova = None self._cinder = None self.live_count = 0 self.planned_live_count = 0 self.cold_count = 0 self.planned_cold_count = 0 self.volume_count = 0 self.planned_volume_count = 0 self.volume_update_count = 0 self.planned_volume_update_count = 0 @classmethod def get_name(cls): return "zone_migration" @classmethod def get_display_name(cls): return _("Zone migration") @classmethod def get_translatable_display_name(cls): return "Zone migration" @classmethod def get_schema(cls): return { "properties": { "compute_nodes": { "type": "array", "items": { "type": "object", "properties": { "src_node": { "description": "Compute node from which" " instances migrate", "type": "string" }, "dst_node": { "description": "Compute node to which " "instances migrate", "type": "string" } }, "required": ["src_node"], "additionalProperties": False } }, "storage_pools": { "type": "array", "items": { "type": "object", "properties": { "src_pool": { "description": "Storage pool from which" " volumes migrate", "type": "string" }, "dst_pool": { "description": "Storage pool to which" " volumes migrate", "type": "string" }, "src_type": { "description": "Volume type from which" " volumes migrate", "type": "string" }, "dst_type": { "description": "Volume type to which" " volumes migrate", "type": "string" } }, "required": ["src_pool", "src_type", "dst_type"], "additionalProperties": False } }, "parallel_total": { "description": "The number of actions to be run in" " parallel in total", "type": "integer", "minimum": 0, "default": 6 }, "parallel_per_node": { "description": "The number of actions to be run in" " parallel per compute node", "type": "integer", "minimum": 0, "default": 2 }, "parallel_per_pool": { "description": "The number of actions to be run in" " parallel per storage host", "type": "integer", "minimum": 0, "default": 2 }, "priority": { "description": "List prioritizes instances and volumes", "type": "object", "properties": { "project": { "type": "array", "items": {"type": "string"} }, "compute_node": { "type": "array", "items": {"type": "string"} }, "storage_pool": { "type": "array", "items": {"type": "string"} }, "compute": { "enum": ["vcpu_num", "mem_size", "disk_size", "created_at"] }, "storage": { "enum": ["size", "created_at"] } }, "additionalProperties": False }, "with_attached_volume": { "description": "instance migrates just after attached" " volumes or not", "type": "boolean", "default": False }, }, "additionalProperties": False } @property def migrate_compute_nodes(self): """Get compute nodes from input_parameters :returns: compute nodes e.g. [{"src_node": "w012", "dst_node": "w022"}, {"src_node": "w013", "dst_node": "w023"}] """ return self.input_parameters.get('compute_nodes') @property def migrate_storage_pools(self): """Get storage pools from input_parameters :returns: storage pools e.g. [ {"src_pool": "src1@back1#pool1", "dst_pool": "dst1@back1#pool1", "src_type": "src1_type", "dst_type": "dst1_type"}, {"src_pool": "src1@back2#pool1", "dst_pool": "dst1@back2#pool1", "src_type": "src1_type", "dst_type": "dst1_type"} ] """ return self.input_parameters.get('storage_pools') @property def parallel_total(self): return self.input_parameters.get('parallel_total') @property def parallel_per_node(self): return self.input_parameters.get('parallel_per_node') @property def parallel_per_pool(self): return self.input_parameters.get('parallel_per_pool') @property def priority(self): """Get priority from input_parameters :returns: priority map e.g. { "project": ["pj1"], "compute_node": ["compute1", "compute2"], "compute": ["vcpu_num"], "storage_pool": ["pool1", "pool2"], "storage": ["size", "created_at"] } """ return self.input_parameters.get('priority') @property def with_attached_volume(self): return self.input_parameters.get('with_attached_volume') @property def nova(self): if self._nova is None: self._nova = nova_helper.NovaHelper(osc=self.osc) return self._nova @property def cinder(self): if self._cinder is None: self._cinder = cinder_helper.CinderHelper(osc=self.osc) return self._cinder def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value, element.ServiceState.DISABLED.value] return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def get_available_storage_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value, element.ServiceState.DISABLED.value] return {uuid: cn for uuid, cn in self.storage_model.get_all_storage_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def pre_execute(self): self._pre_execute() LOG.debug(self.storage_model.to_string()) def do_execute(self, audit=None): """Strategy execution phase """ filtered_targets = self.filtered_targets() self.set_migration_count(filtered_targets) total_limit = self.parallel_total per_node_limit = self.parallel_per_node per_pool_limit = self.parallel_per_pool action_counter = ActionCounter(total_limit, per_pool_limit, per_node_limit) for k, targets in iter(filtered_targets.items()): if k == VOLUME: self.volumes_migration(targets, action_counter) elif k == INSTANCE: if self.volume_count == 0 and self.volume_update_count == 0: # if with_attached_volume is true, # instance having attached volumes already migrated, # migrate instances which does not have attached volumes if self.with_attached_volume: targets = self.instances_no_attached(targets) self.instances_migration(targets, action_counter) else: self.instances_migration(targets, action_counter) LOG.debug("action total: %s, pools: %s, nodes %s ", action_counter.total_count, action_counter.per_pool_count, action_counter.per_node_count) def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ self.solution.set_efficacy_indicators( live_migrate_instance_count=self.live_count, planned_live_migrate_instance_count=self.planned_live_count, cold_migrate_instance_count=self.cold_count, planned_cold_migrate_instance_count=self.planned_cold_count, volume_migrate_count=self.volume_count, planned_volume_migrate_count=self.planned_volume_count, volume_update_count=self.volume_update_count, planned_volume_update_count=self.planned_volume_update_count ) def set_migration_count(self, targets): """Set migration count :param targets: dict of instance object and volume object list keys of dict are instance and volume """ for instance in targets.get('instance', []): if self.is_live(instance): self.live_count += 1 elif self.is_cold(instance): self.cold_count += 1 for volume in targets.get('volume', []): if self.is_available(volume): self.volume_count += 1 elif self.is_in_use(volume): self.volume_update_count += 1 def is_live(self, instance): status = getattr(instance, 'status') state = getattr(instance, 'OS-EXT-STS:vm_state') return (status == status_ACTIVE and state == ACTIVE ) or (status == status_PAUSED and state == PAUSED) def is_cold(self, instance): status = getattr(instance, 'status') state = getattr(instance, 'OS-EXT-STS:vm_state') return status == status_SHUTOFF and state == STOPPED def is_available(self, volume): return getattr(volume, 'status') == AVAILABLE def is_in_use(self, volume): return getattr(volume, 'status') == IN_USE def instances_no_attached(self, instances): return [i for i in instances if not getattr(i, "os-extended-volumes:volumes_attached")] def get_host_by_pool(self, pool): """Get host name from pool name Utility method to get host name from pool name which is formatted as host@backend#pool. :param pool: pool name :returns: host name """ return pool.split('@')[0] def get_dst_node(self, src_node): """Get destination node from self.migration_compute_nodes :param src_node: compute node name :returns: destination node name """ for node in self.migrate_compute_nodes: if node.get("src_node") == src_node: return node.get("dst_node") def get_dst_pool_and_type(self, src_pool, src_type): """Get destination pool and type from self.migration_storage_pools :param src_pool: storage pool name :param src_type: storage volume type :returns: set of storage pool name and volume type name """ for pool in self.migrate_storage_pools: if pool.get("src_pool") == src_pool: return (pool.get("dst_pool", None), pool.get("dst_type")) def volumes_migration(self, volumes, action_counter): for volume in volumes: if action_counter.is_total_max(): LOG.debug('total reached limit') break pool = getattr(volume, 'os-vol-host-attr:host') if action_counter.is_pool_max(pool): LOG.debug("%s has objects to be migrated, but it has" " reached the limit of parallelization.", pool) continue src_type = volume.volume_type dst_pool, dst_type = self.get_dst_pool_and_type(pool, src_type) LOG.debug(src_type) LOG.debug("%s %s", dst_pool, dst_type) if self.is_available(volume): if src_type == dst_type: self._volume_migrate(volume, dst_pool) else: self._volume_retype(volume, dst_type) elif self.is_in_use(volume): self._volume_update(volume, dst_type) # if with_attached_volume is True, migrate attaching instances if self.with_attached_volume: instances = [self.nova.find_instance(dic.get('server_id')) for dic in volume.attachments] self.instances_migration(instances, action_counter) action_counter.add_pool(pool) def instances_migration(self, instances, action_counter): for instance in instances: src_node = getattr(instance, 'OS-EXT-SRV-ATTR:host') if action_counter.is_total_max(): LOG.debug('total reached limit') break if action_counter.is_node_max(src_node): LOG.debug("%s has objects to be migrated, but it has" " reached the limit of parallelization.", src_node) continue dst_node = self.get_dst_node(src_node) if self.is_live(instance): self._live_migration(instance, src_node, dst_node) elif self.is_cold(instance): self._cold_migration(instance, src_node, dst_node) action_counter.add_node(src_node) def _live_migration(self, instance, src_node, dst_node): parameters = {"migration_type": "live", "destination_node": dst_node, "source_node": src_node, "resource_name": instance.name} self.solution.add_action( action_type="migrate", resource_id=instance.id, input_parameters=parameters) self.planned_live_count += 1 def _cold_migration(self, instance, src_node, dst_node): parameters = {"migration_type": "cold", "destination_node": dst_node, "source_node": src_node, "resource_name": instance.name} self.solution.add_action( action_type="migrate", resource_id=instance.id, input_parameters=parameters) self.planned_cold_count += 1 def _volume_update(self, volume, dst_type): parameters = {"migration_type": "swap", "destination_type": dst_type, "resource_name": volume.name} self.solution.add_action( action_type="volume_migrate", resource_id=volume.id, input_parameters=parameters) self.planned_volume_update_count += 1 def _volume_migrate(self, volume, dst_pool): parameters = {"migration_type": "migrate", "destination_node": dst_pool, "resource_name": volume.name} self.solution.add_action( action_type="volume_migrate", resource_id=volume.id, input_parameters=parameters) self.planned_volume_count += 1 def _volume_retype(self, volume, dst_type): parameters = {"migration_type": "retype", "destination_type": dst_type, "resource_name": volume.name} self.solution.add_action( action_type="volume_migrate", resource_id=volume.id, input_parameters=parameters) self.planned_volume_count += 1 def get_src_node_list(self): """Get src nodes from migrate_compute_nodes :returns: src node name list """ if not self.migrate_compute_nodes: return None return [v for dic in self.migrate_compute_nodes for k, v in dic.items() if k == "src_node"] def get_src_pool_list(self): """Get src pools from migrate_storage_pools :returns: src pool name list """ return [v for dic in self.migrate_storage_pools for k, v in dic.items() if k == "src_pool"] def get_instances(self): """Get migrate target instances :returns: instance list on src nodes and compute scope """ src_node_list = self.get_src_node_list() if not src_node_list: return None return [i for i in self.nova.get_instance_list() if getattr(i, 'OS-EXT-SRV-ATTR:host') in src_node_list and self.compute_model.get_instance_by_uuid(i.id)] def get_volumes(self): """Get migrate target volumes :returns: volume list on src pools and storage scope """ src_pool_list = self.get_src_pool_list() return [i for i in self.cinder.get_volume_list() if getattr(i, 'os-vol-host-attr:host') in src_pool_list and self.storage_model.get_volume_by_uuid(i.id)] def filtered_targets(self): """Filter targets prioritize instances and volumes based on priorities from input parameters. :returns: prioritized targets """ result = {} if self.migrate_compute_nodes: result["instance"] = self.get_instances() if self.migrate_storage_pools: result["volume"] = self.get_volumes() if not self.priority: return result filter_actions = self.get_priority_filter_list() LOG.debug(filter_actions) # apply all filters set in input parameter for action in list(reversed(filter_actions)): LOG.debug(action) result = action.apply_filter(result) return result def get_priority_filter_list(self): """Get priority filters :returns: list of filter object with arguments in self.priority """ filter_list = [] priority_filter_map = self.get_priority_filter_map() for k, v in iter(self.priority.items()): if k in priority_filter_map: filter_list.append(priority_filter_map[k](v)) return filter_list def get_priority_filter_map(self): """Get priority filter map :returns: filter map key is the key in priority input parameters. value is filter class for prioritizing. """ return { "project": ProjectSortFilter, "compute_node": ComputeHostSortFilter, "storage_pool": StorageHostSortFilter, "compute": ComputeSpecSortFilter, "storage": StorageSpecSortFilter, } class ActionCounter(object): """Manage the number of actions in parallel""" def __init__(self, total_limit=6, per_pool_limit=2, per_node_limit=2): """Initialize dict of host and the number of action :param total_limit: total number of actions :param per_pool_limit: the number of migrate actions per storage pool :param per_node_limit: the number of migrate actions per compute node """ self.total_limit = total_limit self.per_pool_limit = per_pool_limit self.per_node_limit = per_node_limit self.per_pool_count = {} self.per_node_count = {} self.total_count = 0 def add_pool(self, pool): """Increment the number of actions on host and total count :param pool: storage pool :returns: True if incremented, False otherwise """ if pool not in self.per_pool_count: self.per_pool_count[pool] = 0 if not self.is_total_max() and not self.is_pool_max(pool): self.per_pool_count[pool] += 1 self.total_count += 1 LOG.debug("total: %s, per_pool: %s", self.total_count, self.per_pool_count) return True return False def add_node(self, node): """Add the number of actions on node :param host: compute node :returns: True if action can be added, False otherwise """ if node not in self.per_node_count: self.per_node_count[node] = 0 if not self.is_total_max() and not self.is_node_max(node): self.per_node_count[node] += 1 self.total_count += 1 LOG.debug("total: %s, per_node: %s", self.total_count, self.per_node_count) return True return False def is_total_max(self): """Check if total count reached limit :returns: True if total count reached limit, False otherwise """ return self.total_count >= self.total_limit def is_pool_max(self, pool): """Check if per pool count reached limit :returns: True if count reached limit, False otherwise """ if pool not in self.per_pool_count: self.per_pool_count[pool] = 0 LOG.debug("the number of parallel per pool %s is %s ", pool, self.per_pool_count[pool]) LOG.debug("per pool limit is %s", self.per_pool_limit) return self.per_pool_count[pool] >= self.per_pool_limit def is_node_max(self, node): """Check if per node count reached limit :returns: True if count reached limit, False otherwise """ if node not in self.per_node_count: self.per_node_count[node] = 0 return self.per_node_count[node] >= self.per_node_limit class BaseFilter(object): """Base class for Filter""" apply_targets = ('ALL',) def __init__(self, values=[], **kwargs): """initialization :param values: priority value """ if not isinstance(values, list): values = [values] self.condition = values def apply_filter(self, targets): """apply filter to targets :param targets: dict of instance object and volume object list keys of dict are instance and volume """ if not targets: return {} for cond in list(reversed(self.condition)): for k, v in iter(targets.items()): if not self.is_allowed(k): continue LOG.debug("filter:%s with the key: %s", cond, k) targets[k] = self.exec_filter(v, cond) LOG.debug(targets) return targets def is_allowed(self, key): return (key in self.apply_targets) or ('ALL' in self.apply_targets) def exec_filter(self, items, sort_key): """This is implemented by sub class""" return items class SortMovingToFrontFilter(BaseFilter): """This is to move to front if a condition is True""" def exec_filter(self, items, sort_key): return self.sort_moving_to_front(items, sort_key, self.compare_func) def sort_moving_to_front(self, items, sort_key=None, compare_func=None): if not compare_func or not sort_key: return items for item in list(reversed(items)): if compare_func(item, sort_key): items.remove(item) items.insert(0, item) return items def compare_func(self, item, sort_key): return True class ProjectSortFilter(SortMovingToFrontFilter): """ComputeHostSortFilter""" apply_targets = ('instance', 'volume') def __init__(self, values=[], **kwargs): super(ProjectSortFilter, self).__init__(values, **kwargs) def compare_func(self, item, sort_key): """Compare project id of item with sort_key :param item: instance object or volume object :param sort_key: project id :returns: true: project id of item equals sort_key false: otherwise """ project_id = self.get_project_id(item) LOG.debug("project_id: %s, sort_key: %s", project_id, sort_key) return project_id == sort_key def get_project_id(self, item): """get project id of item :param item: instance object or volume object :returns: project id """ if isinstance(item, Volume): return getattr(item, 'os-vol-tenant-attr:tenant_id') elif isinstance(item, Server): return item.tenant_id class ComputeHostSortFilter(SortMovingToFrontFilter): """ComputeHostSortFilter""" apply_targets = ('instance',) def __init__(self, values=[], **kwargs): super(ComputeHostSortFilter, self).__init__(values, **kwargs) def compare_func(self, item, sort_key): """Compare compute name of item with sort_key :param item: instance object :param sort_key: compute host name :returns: true: compute name on where instance host equals sort_key false: otherwise """ host = self.get_host(item) LOG.debug("host: %s, sort_key: %s", host, sort_key) return host == sort_key def get_host(self, item): """get hostname on which item is :param item: instance object :returns: hostname on which item is """ return getattr(item, 'OS-EXT-SRV-ATTR:host') class StorageHostSortFilter(SortMovingToFrontFilter): """StoragehostSortFilter""" apply_targets = ('volume',) def compare_func(self, item, sort_key): """Compare pool name of item with sort_key :param item: volume object :param sort_key: storage pool name :returns: true: pool name on where instance.host equals sort_key false: otherwise """ host = self.get_host(item) LOG.debug("host: %s, sort_key: %s", host, sort_key) return host == sort_key def get_host(self, item): return getattr(item, 'os-vol-host-attr:host') class ComputeSpecSortFilter(BaseFilter): """ComputeSpecSortFilter""" apply_targets = ('instance',) accept_keys = ['vcpu_num', 'mem_size', 'disk_size', 'created_at'] def __init__(self, values=[], **kwargs): super(ComputeSpecSortFilter, self).__init__(values, **kwargs) self._nova = None @property def nova(self): if self._nova is None: self._nova = nova_helper.NovaHelper() return self._nova def exec_filter(self, items, sort_key): result = items if sort_key not in self.accept_keys: LOG.warning("Invalid key is specified: %s", sort_key) else: result = self.get_sorted_items(items, sort_key) return result def get_sorted_items(self, items, sort_key): """Sort items by sort_key :param items: instances :param sort_key: sort_key :returns: items sorted by sort_key """ result = items flavors = self.nova.get_flavor_list() if sort_key == 'mem_size': result = sorted(items, key=lambda x: float(self.get_mem_size(x, flavors)), reverse=True) elif sort_key == 'vcpu_num': result = sorted(items, key=lambda x: float(self.get_vcpu_num(x, flavors)), reverse=True) elif sort_key == 'disk_size': result = sorted(items, key=lambda x: float( self.get_disk_size(x, flavors)), reverse=True) elif sort_key == 'created_at': result = sorted(items, key=lambda x: parse(getattr(x, sort_key)), reverse=False) return result def get_mem_size(self, item, flavors): """Get memory size of item :param item: instance :param flavors: flavors :returns: memory size of item """ LOG.debug("item: %s, flavors: %s", item, flavors) for flavor in flavors: LOG.debug("item.flavor: %s, flavor: %s", item.flavor, flavor) if item.flavor.get('id') == flavor.id: LOG.debug("flavor.ram: %s", flavor.ram) return flavor.ram def get_vcpu_num(self, item, flavors): """Get vcpu number of item :param item: instance :param flavors: flavors :returns: vcpu number of item """ LOG.debug("item: %s, flavors: %s", item, flavors) for flavor in flavors: LOG.debug("item.flavor: %s, flavor: %s", item.flavor, flavor) if item.flavor.get('id') == flavor.id: LOG.debug("flavor.vcpus: %s", flavor.vcpus) return flavor.vcpus def get_disk_size(self, item, flavors): """Get disk size of item :param item: instance :param flavors: flavors :returns: disk size of item """ LOG.debug("item: %s, flavors: %s", item, flavors) for flavor in flavors: LOG.debug("item.flavor: %s, flavor: %s", item.flavor, flavor) if item.flavor.get('id') == flavor.id: LOG.debug("flavor.disk: %s", flavor.disk) return flavor.disk class StorageSpecSortFilter(BaseFilter): """StorageSpecSortFilter""" apply_targets = ('volume',) accept_keys = ['size', 'created_at'] def exec_filter(self, items, sort_key): result = items if sort_key not in self.accept_keys: LOG.warning("Invalid key is specified: %s", sort_key) return result if sort_key == 'created_at': result = sorted(items, key=lambda x: parse(getattr(x, sort_key)), reverse=False) else: result = sorted(items, key=lambda x: float(getattr(x, sort_key)), reverse=True) LOG.debug(result) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/sync.py0000664000175000017500000006135000000000000022767 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import collections from oslo_log import log from watcher.common import context from watcher.decision_engine.loading import default from watcher.decision_engine.scoring import scoring_factory from watcher import objects LOG = log.getLogger(__name__) GoalMapping = collections.namedtuple( 'GoalMapping', ['name', 'display_name', 'efficacy_specification']) StrategyMapping = collections.namedtuple( 'StrategyMapping', ['name', 'goal_name', 'display_name', 'parameters_spec']) ScoringEngineMapping = collections.namedtuple( 'ScoringEngineMapping', ['name', 'description', 'metainfo']) IndicatorSpec = collections.namedtuple( 'IndicatorSpec', ['name', 'description', 'unit', 'schema']) class Syncer(object): """Syncs all available goals and strategies with the Watcher DB""" def __init__(self): self.ctx = context.make_context() self.discovered_map = None self._available_goals = None self._available_goals_map = None self._available_strategies = None self._available_strategies_map = None self._available_scoringengines = None self._available_scoringengines_map = None # This goal mapping maps stale goal IDs to the synced goal self.goal_mapping = dict() # This strategy mapping maps stale strategy IDs to the synced goal self.strategy_mapping = dict() # Maps stale scoring engine IDs to the synced scoring engines self.se_mapping = dict() self.stale_audit_templates_map = {} self.stale_audits_map = {} self.stale_action_plans_map = {} @property def available_goals(self): """Goals loaded from DB""" if self._available_goals is None: self._available_goals = objects.Goal.list(self.ctx) return self._available_goals @property def available_strategies(self): """Strategies loaded from DB""" if self._available_strategies is None: self._available_strategies = objects.Strategy.list(self.ctx) goal_ids = [g.id for g in self.available_goals] stale_strategies = [s for s in self._available_strategies if s.goal_id not in goal_ids] for s in stale_strategies: LOG.info("Can't find Goal id %d of strategy %s", s.goal_id, s.name) s.soft_delete() self._available_strategies.remove(s) return self._available_strategies @property def available_scoringengines(self): """Scoring Engines loaded from DB""" if self._available_scoringengines is None: self._available_scoringengines = (objects.ScoringEngine .list(self.ctx)) return self._available_scoringengines @property def available_goals_map(self): """Mapping of goals loaded from DB""" if self._available_goals_map is None: self._available_goals_map = { GoalMapping( name=g.name, display_name=g.display_name, efficacy_specification=tuple( IndicatorSpec(**item) for item in g.efficacy_specification)): g for g in self.available_goals } return self._available_goals_map @property def available_strategies_map(self): if self._available_strategies_map is None: goals_map = {g.id: g.name for g in self.available_goals} self._available_strategies_map = { StrategyMapping( name=s.name, goal_name=goals_map[s.goal_id], display_name=s.display_name, parameters_spec=str(s.parameters_spec)): s for s in self.available_strategies } return self._available_strategies_map @property def available_scoringengines_map(self): if self._available_scoringengines_map is None: self._available_scoringengines_map = { ScoringEngineMapping( name=s.id, description=s.description, metainfo=s.metainfo): s for s in self.available_scoringengines } return self._available_scoringengines_map def sync(self): self.discovered_map = self._discover() goals_map = self.discovered_map["goals"] strategies_map = self.discovered_map["strategies"] scoringengines_map = self.discovered_map["scoringengines"] for goal_name, goal_map in goals_map.items(): if goal_map in self.available_goals_map: LOG.info("Goal %s already exists", goal_name) continue self.goal_mapping.update(self._sync_goal(goal_map)) for strategy_name, strategy_map in strategies_map.items(): if (strategy_map in self.available_strategies_map and strategy_map.goal_name not in [g.name for g in self.goal_mapping.values()]): LOG.info("Strategy %s already exists", strategy_name) continue self.strategy_mapping.update(self._sync_strategy(strategy_map)) for se_name, se_map in scoringengines_map.items(): if se_map in self.available_scoringengines_map: LOG.info("Scoring Engine %s already exists", se_name) continue self.se_mapping.update(self._sync_scoringengine(se_map)) self._sync_objects() self._soft_delete_removed_scoringengines() def _sync_goal(self, goal_map): goal_name = goal_map.name goal_mapping = dict() # Goals that are matching by name with the given discovered goal name matching_goals = [g for g in self.available_goals if g.name == goal_name] stale_goals = self._soft_delete_stale_goals(goal_map, matching_goals) if stale_goals or not matching_goals: goal = objects.Goal(self.ctx) goal.name = goal_name goal.display_name = goal_map.display_name goal.efficacy_specification = [ indicator._asdict() for indicator in goal_map.efficacy_specification] goal.create() LOG.info("Goal %s created", goal_name) # Updating the internal states self.available_goals_map[goal] = goal_map # Map the old goal IDs to the new (equivalent) goal for matching_goal in matching_goals: goal_mapping[matching_goal.id] = goal return goal_mapping def _sync_strategy(self, strategy_map): strategy_name = strategy_map.name strategy_display_name = strategy_map.display_name goal_name = strategy_map.goal_name parameters_spec = strategy_map.parameters_spec strategy_mapping = dict() # Strategies that are matching by name with the given # discovered strategy name matching_strategies = [s for s in self.available_strategies if s.name == strategy_name] stale_strategies = self._soft_delete_stale_strategies( strategy_map, matching_strategies) if stale_strategies or not matching_strategies: strategy = objects.Strategy(self.ctx) strategy.name = strategy_name strategy.display_name = strategy_display_name strategy.goal_id = objects.Goal.get_by_name(self.ctx, goal_name).id strategy.parameters_spec = parameters_spec strategy.create() LOG.info("Strategy %s created", strategy_name) # Updating the internal states self.available_strategies_map[strategy] = strategy_map # Map the old strategy IDs to the new (equivalent) strategy for matching_strategy in matching_strategies: strategy_mapping[matching_strategy.id] = strategy return strategy_mapping def _sync_scoringengine(self, scoringengine_map): scoringengine_name = scoringengine_map.name se_mapping = dict() # Scoring Engines matching by id with discovered Scoring engine matching_scoringengines = [se for se in self.available_scoringengines if se.name == scoringengine_name] stale_scoringengines = self._soft_delete_stale_scoringengines( scoringengine_map, matching_scoringengines) if stale_scoringengines or not matching_scoringengines: scoringengine = objects.ScoringEngine(self.ctx) scoringengine.name = scoringengine_name scoringengine.description = scoringengine_map.description scoringengine.metainfo = scoringengine_map.metainfo scoringengine.create() LOG.info("Scoring Engine %s created", scoringengine_name) # Updating the internal states self.available_scoringengines_map[scoringengine] = \ scoringengine_map # Map the old scoring engine names to the new (equivalent) SE for matching_scoringengine in matching_scoringengines: se_mapping[matching_scoringengine.name] = scoringengine return se_mapping def _sync_objects(self): # First we find audit templates, audits and action plans that are stale # because their associated goal or strategy has been modified and we # update them in-memory self._find_stale_audit_templates_due_to_goal() self._find_stale_audit_templates_due_to_strategy() self._find_stale_audits_due_to_goal() self._find_stale_audits_due_to_strategy() self._find_stale_action_plans_due_to_strategy() self._find_stale_action_plans_due_to_audit() # Then we handle the case where an audit template, an audit or an # action plan becomes stale because its related goal does not # exist anymore. self._soft_delete_removed_goals() # Then we handle the case where an audit template, an audit or an # action plan becomes stale because its related strategy does not # exist anymore. self._soft_delete_removed_strategies() # Finally, we save into the DB the updated stale audit templates # and soft delete stale audits and action plans for stale_audit_template in self.stale_audit_templates_map.values(): stale_audit_template.save() LOG.info("Audit Template '%s' synced", stale_audit_template.name) for stale_audit in self.stale_audits_map.values(): stale_audit.save() LOG.info("Stale audit '%s' synced and cancelled", stale_audit.uuid) for stale_action_plan in self.stale_action_plans_map.values(): stale_action_plan.save() LOG.info("Stale action plan '%s' synced and cancelled", stale_action_plan.uuid) def _find_stale_audit_templates_due_to_goal(self): for goal_id, synced_goal in self.goal_mapping.items(): filters = {"goal_id": goal_id} stale_audit_templates = objects.AuditTemplate.list( self.ctx, filters=filters) # Update the goal ID for the stale audit templates (w/o saving) for audit_template in stale_audit_templates: if audit_template.id not in self.stale_audit_templates_map: audit_template.goal_id = synced_goal.id self.stale_audit_templates_map[audit_template.id] = ( audit_template) else: self.stale_audit_templates_map[ audit_template.id].goal_id = synced_goal.id def _find_stale_audit_templates_due_to_strategy(self): for strategy_id, synced_strategy in self.strategy_mapping.items(): filters = {"strategy_id": strategy_id} stale_audit_templates = objects.AuditTemplate.list( self.ctx, filters=filters) # Update strategy IDs for all stale audit templates (w/o saving) for audit_template in stale_audit_templates: if audit_template.id not in self.stale_audit_templates_map: audit_template.strategy_id = synced_strategy.id self.stale_audit_templates_map[audit_template.id] = ( audit_template) else: self.stale_audit_templates_map[ audit_template.id].strategy_id = synced_strategy.id def _find_stale_audits_due_to_goal(self): for goal_id, synced_goal in self.goal_mapping.items(): filters = {"goal_id": goal_id} stale_audits = objects.Audit.list( self.ctx, filters=filters, eager=True) # Update the goal ID for the stale audits (w/o saving) for audit in stale_audits: if audit.id not in self.stale_audits_map: audit.goal_id = synced_goal.id self.stale_audits_map[audit.id] = audit else: self.stale_audits_map[audit.id].goal_id = synced_goal.id def _find_stale_audits_due_to_strategy(self): for strategy_id, synced_strategy in self.strategy_mapping.items(): filters = {"strategy_id": strategy_id} stale_audits = objects.Audit.list( self.ctx, filters=filters, eager=True) # Update strategy IDs for all stale audits (w/o saving) for audit in stale_audits: if audit.id not in self.stale_audits_map: audit.strategy_id = synced_strategy.id audit.state = objects.audit.State.CANCELLED self.stale_audits_map[audit.id] = audit else: self.stale_audits_map[ audit.id].strategy_id = synced_strategy.id self.stale_audits_map[ audit.id].state = objects.audit.State.CANCELLED def _find_stale_action_plans_due_to_strategy(self): for strategy_id, synced_strategy in self.strategy_mapping.items(): filters = {"strategy_id": strategy_id} stale_action_plans = objects.ActionPlan.list( self.ctx, filters=filters, eager=True) # Update strategy IDs for all stale action plans (w/o saving) for action_plan in stale_action_plans: if action_plan.id not in self.stale_action_plans_map: action_plan.strategy_id = synced_strategy.id action_plan.state = objects.action_plan.State.CANCELLED self.stale_action_plans_map[action_plan.id] = action_plan else: self.stale_action_plans_map[ action_plan.id].strategy_id = synced_strategy.id self.stale_action_plans_map[ action_plan.id].state = ( objects.action_plan.State.CANCELLED) def _find_stale_action_plans_due_to_audit(self): for audit_id, synced_audit in self.stale_audits_map.items(): filters = {"audit_id": audit_id} stale_action_plans = objects.ActionPlan.list( self.ctx, filters=filters, eager=True) # Update audit IDs for all stale action plans (w/o saving) for action_plan in stale_action_plans: if action_plan.id not in self.stale_action_plans_map: action_plan.audit_id = synced_audit.id action_plan.state = objects.action_plan.State.CANCELLED self.stale_action_plans_map[action_plan.id] = action_plan else: self.stale_action_plans_map[ action_plan.id].audit_id = synced_audit.id self.stale_action_plans_map[ action_plan.id].state = ( objects.action_plan.State.CANCELLED) def _soft_delete_removed_goals(self): removed_goals = [ g for g in self.available_goals if g.name not in self.discovered_map['goals']] for removed_goal in removed_goals: removed_goal.soft_delete() filters = {"goal_id": removed_goal.id} invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) for at in invalid_ats: LOG.warning( "Audit Template '%(audit_template)s' references a " "goal that does not exist", audit_template=at.uuid) stale_audits = objects.Audit.list( self.ctx, filters=filters, eager=True) for audit in stale_audits: LOG.warning( "Audit '%(audit)s' references a " "goal that does not exist", audit=audit.uuid) if audit.id not in self.stale_audits_map: audit.state = objects.audit.State.CANCELLED self.stale_audits_map[audit.id] = audit else: self.stale_audits_map[ audit.id].state = objects.audit.State.CANCELLED def _soft_delete_removed_strategies(self): removed_strategies = [ s for s in self.available_strategies if s.name not in self.discovered_map['strategies']] for removed_strategy in removed_strategies: removed_strategy.soft_delete() filters = {"strategy_id": removed_strategy.id} invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) for at in invalid_ats: LOG.info( "Audit Template '%(audit_template)s' references a " "strategy that does not exist", audit_template=at.uuid) # In this case we can reset the strategy ID to None # so the audit template can still achieve the same goal # but with a different strategy if at.id not in self.stale_audit_templates_map: at.strategy_id = None self.stale_audit_templates_map[at.id] = at else: self.stale_audit_templates_map[at.id].strategy_id = None stale_audits = objects.Audit.list( self.ctx, filters=filters, eager=True) for audit in stale_audits: LOG.warning( "Audit '%(audit)s' references a " "strategy that does not exist", audit=audit.uuid) if audit.id not in self.stale_audits_map: audit.state = objects.audit.State.CANCELLED self.stale_audits_map[audit.id] = audit else: self.stale_audits_map[ audit.id].state = objects.audit.State.CANCELLED stale_action_plans = objects.ActionPlan.list( self.ctx, filters=filters, eager=True) for action_plan in stale_action_plans: LOG.warning( "Action Plan '%(action_plan)s' references a " "strategy that does not exist", action_plan=action_plan.uuid) if action_plan.id not in self.stale_action_plans_map: action_plan.state = objects.action_plan.State.CANCELLED self.stale_action_plans_map[action_plan.id] = action_plan else: self.stale_action_plans_map[ action_plan.id].state = ( objects.action_plan.State.CANCELLED) def _soft_delete_removed_scoringengines(self): removed_se = [ se for se in self.available_scoringengines if se.name not in self.discovered_map['scoringengines']] for se in removed_se: LOG.info("Scoring Engine %s removed", se.name) se.soft_delete() def _discover(self): strategies_map = {} goals_map = {} scoringengines_map = {} discovered_map = { "goals": goals_map, "strategies": strategies_map, "scoringengines": scoringengines_map} goal_loader = default.DefaultGoalLoader() implemented_goals = goal_loader.list_available() strategy_loader = default.DefaultStrategyLoader() implemented_strategies = strategy_loader.list_available() for goal_cls in implemented_goals.values(): goals_map[goal_cls.get_name()] = GoalMapping( name=goal_cls.get_name(), display_name=goal_cls.get_translatable_display_name(), efficacy_specification=tuple( IndicatorSpec(**indicator.to_dict()) for indicator in goal_cls.get_efficacy_specification( ).get_indicators_specifications())) for strategy_cls in implemented_strategies.values(): strategies_map[strategy_cls.get_name()] = StrategyMapping( name=strategy_cls.get_name(), goal_name=strategy_cls.get_goal_name(), display_name=strategy_cls.get_translatable_display_name(), parameters_spec=str(strategy_cls.get_schema())) for se in scoring_factory.get_scoring_engine_list(): scoringengines_map[se.get_name()] = ScoringEngineMapping( name=se.get_name(), description=se.get_description(), metainfo=se.get_metainfo()) return discovered_map def _soft_delete_stale_goals(self, goal_map, matching_goals): """Soft delete the stale goals :param goal_map: discovered goal map :type goal_map: :py:class:`~.GoalMapping` instance :param matching_goals: list of DB goals matching the goal_map :type matching_goals: list of :py:class:`~.objects.Goal` instances :returns: A list of soft deleted DB goals (subset of matching goals) :rtype: list of :py:class:`~.objects.Goal` instances """ goal_display_name = goal_map.display_name goal_name = goal_map.name goal_efficacy_spec = goal_map.efficacy_specification stale_goals = [] for matching_goal in matching_goals: if (matching_goal.efficacy_specification == goal_efficacy_spec and matching_goal.display_name == goal_display_name): LOG.info("Goal %s unchanged", goal_name) else: LOG.info("Goal %s modified", goal_name) matching_goal.soft_delete() stale_goals.append(matching_goal) return stale_goals def _soft_delete_stale_strategies(self, strategy_map, matching_strategies): strategy_name = strategy_map.name strategy_display_name = strategy_map.display_name parameters_spec = strategy_map.parameters_spec stale_strategies = [] for matching_strategy in matching_strategies: if (matching_strategy.display_name == strategy_display_name and matching_strategy.goal_id not in self.goal_mapping and matching_strategy.parameters_spec == ast.literal_eval(parameters_spec)): LOG.info("Strategy %s unchanged", strategy_name) else: LOG.info("Strategy %s modified", strategy_name) matching_strategy.soft_delete() stale_strategies.append(matching_strategy) return stale_strategies def _soft_delete_stale_scoringengines( self, scoringengine_map, matching_scoringengines): se_name = scoringengine_map.name se_description = scoringengine_map.description se_metainfo = scoringengine_map.metainfo stale_scoringengines = [] for matching_scoringengine in matching_scoringengines: if (matching_scoringengine.description == se_description and matching_scoringengine.metainfo == se_metainfo): LOG.info("Scoring Engine %s unchanged", se_name) else: LOG.info("Scoring Engine %s modified", se_name) matching_scoringengine.soft_delete() stale_scoringengines.append(matching_scoringengine) return stale_scoringengines ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/decision_engine/threading.py0000664000175000017500000000734500000000000023764 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import futurist from futurist import waiters from oslo_config import cfg from oslo_log import log from oslo_service import service CONF = cfg.CONF LOG = log.getLogger(__name__) class DecisionEngineThreadPool(object, metaclass=service.Singleton): """Singleton threadpool to submit general tasks to""" def __init__(self): self.amount_workers = CONF.watcher_decision_engine.max_general_workers self._threadpool = futurist.GreenThreadPoolExecutor( max_workers=self.amount_workers) def submit(self, fn, *args, **kwargs): """Will submit the job to the underlying threadpool :param fn: function to execute in another thread :param args: arguments for the function :param kwargs: amount of arguments for the function :return: future to monitor progress of execution :rtype: :py:class"`futurist.GreenFuture` """ return self._threadpool.submit(fn, *args, **kwargs) @staticmethod def do_while_futures(futures, fn, *args, **kwargs): """Do while to execute a function upon completion from a collection Will execute the specified function with its arguments when one of the futures from the passed collection finishes. Additionally, the future is passed as first argument to the function. Does not modify the passed collection of futures. :param futures: list, set or dictionary of futures :type futures: list :py:class:`futurist.GreenFuture` :param fn: function to execute upon the future finishing execution :param args: arguments for the function :param kwargs: amount of arguments for the function """ # shallow copy the collection to not modify it outside of this method. # shallow copy must be used because the type of collection needs to be # determined at runtime (can be both list, set and dict). futures = copy.copy(futures) DecisionEngineThreadPool.do_while_futures_modify( futures, fn, *args, **kwargs) @staticmethod def do_while_futures_modify(futures, fn, *args, **kwargs): """Do while to execute a function upon completion from a collection Will execute the specified function with its arguments when one of the futures from the passed collection finishes. Additionally, the future is passed as first argument to the function. Modifies the collection by removing completed elements, :param futures: list, set or dictionary of futures :type futures: list :py:class:`futurist.GreenFuture` :param fn: function to execute upon the future finishing execution :param args: arguments for the function :param kwargs: amount of arguments for the function """ waits = waiters.wait_for_any(futures) while len(waits[0]) > 0 or len(waits[1]) > 0: for future in waiters.wait_for_any(futures)[0]: fn(future, *args, **kwargs) futures.remove(future) waits = waiters.wait_for_any(futures) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/eventlet.py0000664000175000017500000000252500000000000020516 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os MONKEY_PATCHED = False def is_patched(): return MONKEY_PATCHED def _monkey_patch(): if is_patched(): return # Anything imported here will not be monkey patched. It is # important to take care not to import anything here which requires monkey # patching. eventlet processes environment variables at import-time. # as such any eventlet configuration should happen here if needed. import eventlet eventlet.monkey_patch() def patch(): # This is only for debugging, this should not be used in production. if (os.environ.get('OS_WATCHER_DISABLE_EVENTLET_PATCHING', '').lower() not in ('1', 'true', 'yes', 'y')): _monkey_patch() global MONKEY_PATCHED MONKEY_PATCHED = True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6391354 python_watcher-14.0.0/watcher/hacking/0000775000175000017500000000000000000000000017716 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/hacking/__init__.py0000664000175000017500000000000000000000000022015 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/hacking/checks.py0000664000175000017500000002421700000000000021536 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re def flake8ext(f): """Decorator to indicate flake8 extension. This is borrowed from hacking.core.flake8ext(), but at now it is used only for unit tests to know which are watcher flake8 extensions. """ f.name = __name__ f.version = '0.0.1' f.skip_on_py3 = False return f # Guidelines for writing new hacking checks # # - Use only for Watcher specific tests. OpenStack general tests # should be submitted to the common 'hacking' module. # - Pick numbers in the range N3xx. Find the current test with # the highest allocated number and then pick the next value. # - Keep the test method code in the source file ordered based # on the N3xx value. # - List the new rule in the top level HACKING.rst file _all_log_levels = { 'reserved': '_', # this should never be used with a log unless # it is a variable used for a log message and # a exception 'error': '_LE', 'info': '_LI', 'warning': '_LW', 'critical': '_LC', 'exception': '_LE', } _all_hints = set(_all_log_levels.values()) def _regex_for_level(level, hint): return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % { 'level': level, 'wrong_hints': '|'.join(_all_hints - set([hint])), } log_warn = re.compile( r"(.)*LOG\.(warn)\(\s*('|\"|_)") unittest_imports_dot = re.compile(r"\bimport[\s]+unittest\b") unittest_imports_from = re.compile(r"\bfrom[\s]+unittest\b") re_redundant_import_alias = re.compile(r".*import (.+) as \1$") @flake8ext def use_jsonutils(logical_line, filename): msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s" # Skip list is currently empty. json_check_skipped_patterns = [] for pattern in json_check_skipped_patterns: if pattern in filename: return if "json." in logical_line: json_funcs = ['dumps(', 'dump(', 'loads(', 'load('] for f in json_funcs: pos = logical_line.find('json.%s' % f) if pos != -1: yield (pos, msg % {'fun': f[:-1]}) @flake8ext def no_translate_debug_logs(logical_line, filename): """Check for 'LOG.debug(_(' and 'LOG.debug(_Lx(' As per our translation policy, https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation we shouldn't translate debug level logs. * This check assumes that 'LOG' is a logger. N319 """ for hint in _all_hints: if logical_line.startswith("LOG.debug(%s(" % hint): yield (0, "N319 Don't translate debug level logs") @flake8ext def check_assert_called_once_with(logical_line, filename): # Try to detect unintended calls of nonexistent mock methods like: # assert_called_once # assertCalledOnceWith # assert_has_called # called_once_with if 'watcher/tests/' in filename: if '.assert_called_once_with(' in logical_line: return uncased_line = logical_line.lower().replace('_', '') check_calls = ['.assertcalledonce', '.calledoncewith'] if any(x for x in check_calls if x in uncased_line): msg = ("N322: Possible use of no-op mock method. " "please use assert_called_once_with.") yield (0, msg) if '.asserthascalled' in uncased_line: msg = ("N322: Possible use of no-op mock method. " "please use assert_has_calls.") yield (0, msg) @flake8ext def check_python3_xrange(logical_line): if re.search(r"\bxrange\s*\(", logical_line): yield (0, "N325: Do not use xrange. Use range for large loops.") @flake8ext def check_no_basestring(logical_line): if re.search(r"\bbasestring\b", logical_line): msg = ("N326: basestring is not Python3-compatible, use str instead.") yield (0, msg) @flake8ext def check_python3_no_iteritems(logical_line): if re.search(r".*\.iteritems\(\)", logical_line): msg = ("N327: Use dict.items() instead of dict.iteritems().") yield (0, msg) @flake8ext def check_asserttrue(logical_line, filename): if 'watcher/tests/' in filename: if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?\)", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?\)", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) @flake8ext def check_assertfalse(logical_line, filename): if 'watcher/tests/' in filename: if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line): msg = ("N329: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line): msg = ("N329: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) @flake8ext def check_assertempty(logical_line, filename): if 'watcher/tests/' in filename: msg = ("N330: Use assertEqual(*empty*, observed) instead of " "assertEqual(observed, *empty*). *empty* contains " "{}, [], (), set(), '', \"\"") empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")" reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties if re.search(reg, logical_line): yield (0, msg) @flake8ext def check_assertisinstance(logical_line, filename): if 'watcher/tests/' in filename: if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)", logical_line): msg = ("N331: Use assertIsInstance(observed, type) instead " "of assertTrue(isinstance(observed, type))") yield (0, msg) @flake8ext def check_assertequal_for_httpcode(logical_line, filename): msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) " "instead of assertEqual(observed_http_code, expected_http_code)") if 'watcher/tests/' in filename: if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)", logical_line): yield (0, msg) @flake8ext def check_log_warn_deprecated(logical_line, filename): msg = "N333: Use LOG.warning due to compatibility with py3" if log_warn.match(logical_line): yield (0, msg) @flake8ext def check_oslo_i18n_wrapper(logical_line, filename, noqa): """Check for watcher.i18n usage. N340(watcher/foo/bar.py): from watcher.i18n import _ Okay(watcher/foo/bar.py): from watcher.i18n import _ # noqa """ if noqa: return split_line = logical_line.split() modulename = os.path.normpath(filename).split('/')[0] bad_i18n_module = '%s.i18n' % modulename if (len(split_line) > 1 and split_line[0] in ('import', 'from')): if (split_line[1] == bad_i18n_module or modulename != 'watcher' and split_line[1] in ('watcher.i18n', 'watcher._i18n')): msg = ("N340: %(found)s is found. Use %(module)s._i18n instead." % {'found': split_line[1], 'module': modulename}) yield (0, msg) @flake8ext def check_builtins_gettext(logical_line, tokens, filename, lines, noqa): """Check usage of builtins gettext _(). N341(watcher/foo.py): _('foo') Okay(watcher/i18n.py): _('foo') Okay(watcher/_i18n.py): _('foo') Okay(watcher/foo.py): _('foo') # noqa """ if noqa: return modulename = os.path.normpath(filename).split('/')[0] if '%s/tests' % modulename in filename: return if os.path.basename(filename) in ('i18n.py', '_i18n.py'): return token_values = [t[1] for t in tokens] i18n_wrapper = '%s._i18n' % modulename if '_' in token_values: i18n_import_line_found = False for line in lines: split_line = [elm.rstrip(',') for elm in line.split()] if (len(split_line) > 1 and split_line[0] == 'from' and split_line[1] == i18n_wrapper and '_' in split_line): i18n_import_line_found = True break if not i18n_import_line_found: msg = ("N341: _ from python builtins module is used. " "Use _ from %s instead." % i18n_wrapper) yield (0, msg) @flake8ext def no_redundant_import_alias(logical_line): """Checking no redundant import alias. https://bugs.launchpad.net/watcher/+bug/1745527 N342 """ if re.match(re_redundant_import_alias, logical_line): yield (0, "N342: No redundant import alias.") @flake8ext def import_stock_mock(logical_line): """Use python's mock, not the mock library. Since we `dropped support for python 2`__, we no longer need to use the mock library, which existed to backport py3 functionality into py2. Which must be done by saying:: from unittest import mock ...because if you say:: import mock ...you definitely will not be getting the standard library mock. That will always import the third party mock library. This check can be removed in the future (and we can start saying ``import mock`` again) if we manage to purge these transitive dependencies. .. __: https://review.opendev.org/#/c/717540 N366 """ if logical_line == 'import mock': yield (0, "N366: You must explicitly import python's mock: " "``from unittest import mock``") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6031353 python_watcher-14.0.0/watcher/locale/0000775000175000017500000000000000000000000017551 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6031353 python_watcher-14.0.0/watcher/locale/de/0000775000175000017500000000000000000000000020141 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6391354 python_watcher-14.0.0/watcher/locale/de/LC_MESSAGES/0000775000175000017500000000000000000000000021726 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/locale/de/LC_MESSAGES/watcher.po0000664000175000017500000006351700000000000023737 0ustar00zuulzuul00000000000000# Frank Kloeker , 2018. #zanata # Andreas Jaeger , 2020. #zanata msgid "" msgstr "" "Project-Id-Version: watcher VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2020-04-26 02:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2020-04-25 11:45+0000\n" "Last-Translator: Andreas Jaeger \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid " (may include orphans)" msgstr "(kann Waisen einschließen)" msgid " (orphans excluded)" msgstr "(Waisen ausgeschlossen)" #, python-format msgid "%(client)s connection failed. Reason: %(reason)s" msgstr "Die Verbindung von %(client)s ist fehlgeschlagen. Grund: %(reason)s" #, python-format msgid "%(field)s can't be updated." msgstr "%(field)s kann nicht aktualisiert werden." #, python-format msgid "%(parameter)s has to be of type %(parameter_type)s" msgstr "%(parameter)s muss vom Typ %(parameter_type)s sein" #, python-format msgid "%s is not JSON serializable" msgstr "%s ist nicht JSON serialisierbar" #, python-format msgid "" "'%(strategy)s' strategy does relate to the '%(goal)s' goal. Possible " "choices: %(choices)s" msgstr "" "Die Strategie '%(strategy)s' bezieht sich auf das Ziel'%(goal)s'. Mögliche " "Auswahlmöglichkeiten: %(choices)s" #, python-format msgid "'%s' is a mandatory attribute and can not be removed" msgstr "'%s' ist ein obligatorisches Attribut und kann nicht entfernt werden" #, python-format msgid "'%s' is an internal attribute and can not be updated" msgstr "'%s' ist ein internes Attribut und kann nicht aktualisiert werden" msgid "'add' and 'replace' operations needs value" msgstr "'add' und 'replace' Operationen benötigt Wert" msgid "'obj' argument type is not valid" msgstr "Der Argumenttyp 'obj' ist nicht gültig" #, python-format msgid "'obj' argument type is not valid: %s" msgstr "Der Argumenttyp 'obj' ist nicht gültig: %s" #, python-format msgid "A goal with UUID %(uuid)s already exists" msgstr "Ein Ziel mit UUID %(uuid)s ist bereits vorhanden" #, python-format msgid "A scoring engine with UUID %(uuid)s already exists" msgstr "Eine Scoring-Engine mit UUID %(uuid)s ist bereits vorhanden" #, python-format msgid "A service with name %(name)s is already working on %(host)s." msgstr "Ein Dienst mit dem Namen %(name)s arbeitet bereits auf %(host)s." #, python-format msgid "A strategy with UUID %(uuid)s already exists" msgstr "Eine Strategie mit UUID %(uuid)s ist bereits vorhanden" msgid "A valid goal_id or audit_template_id must be provided" msgstr "Eine gültige goal_id oder audit_template_id muss angegeben werden" #, python-format msgid "Action %(action)s could not be found" msgstr "Aktion %(action)s konnte nicht gefunden werden" #, python-format msgid "Action %(action)s was not eagerly loaded" msgstr "Aktion %(action)s wurde nicht eifrig geladen" #, python-format msgid "Action Plan %(action_plan)s is currently running." msgstr "Der Aktionsplan %(action_plan)s wird gerade ausgeführt." #, python-format msgid "Action Plan %(action_plan)s is referenced by one or multiple actions" msgstr "" "Der Aktionsplan %(action_plan)s wird durch eine oder mehrere Aktionen " "referenziert" #, python-format msgid "Action Plan with UUID %(uuid)s is cancelled by user" msgstr "Der Aktionsplan mit der UUID %(uuid)s wird vom Benutzer abgebrochen" msgid "Action Plans" msgstr "Aktionspläne" #, python-format msgid "Action plan %(action_plan)s is invalid" msgstr "Der Aktionsplan %(action_plan)s ist ungültig" #, python-format msgid "Action plan %(action_plan)s is referenced by one or multiple goals" msgstr "" "Der Aktionsplan %(action_plan)s wird von einem oder mehreren Zielen " "referenziert" #, python-format msgid "Action plan %(action_plan)s was not eagerly loaded" msgstr "Der Aktionsplan %(action_plan)s wurde nicht eifrig geladen" #, python-format msgid "ActionPlan %(action_plan)s could not be found" msgstr "ActionPlan %(action_plan)s konnte nicht gefunden werden" msgid "Actions" msgstr "Aktionen" msgid "Actuator" msgstr "Betätiger" #, python-format msgid "Adding a new attribute (%s) to the root of the resource is not allowed" msgstr "" "Das Hinzufügen eines neuen Attributs (%s) zum Stamm der Ressource ist nicht " "zulässig" msgid "Airflow Optimization" msgstr "Luftstrom-Optimierung" #, python-format msgid "An action description with type %(action_type)s is already exist." msgstr "" "Eine Aktionsbeschreibung vom Typ %(action_type)s ist bereits vorhanden." #, python-format msgid "An action plan with UUID %(uuid)s already exists" msgstr "Ein Aktionsplan mit UUID %(uuid)s ist bereits vorhanden" #, python-format msgid "An action with UUID %(uuid)s already exists" msgstr "Eine Aktion mit UUID %(uuid)s ist bereits vorhanden" #, python-format msgid "An audit with UUID or name %(audit)s already exists" msgstr "Ein Audit mit UUID oder Name %(audit)s ist bereits vorhanden" #, python-format msgid "An audit_template with UUID or name %(audit_template)s already exists" msgstr "" "Ein Audit_Template mit UUID oder Name %(audit_template)s ist bereits " "vorhanden" msgid "An indicator value should be a number" msgstr "Ein Indikatorwert sollte eine Zahl sein" msgid "An unknown exception occurred" msgstr "Eine unbekannte Ausnahme ist aufgetreten" msgid "At least one feature is required" msgstr "Mindestens eine Funktion ist erforderlich" #, python-format msgid "Audit %(audit)s could not be found" msgstr "Audit %(audit)s konnte nicht gefunden werden" #, python-format msgid "Audit %(audit)s is invalid" msgstr "Audit %(audit)s ist ungültig" #, python-format msgid "Audit %(audit)s is referenced by one or multiple action plans" msgstr "" "Audit %(audit)s wird von einem oder mehreren Aktionsplänen referenziert" #, python-format msgid "Audit %(audit)s was not eagerly loaded" msgstr "Audit %(audit)s wurde nicht eifrig geladen" msgid "Audit Templates" msgstr "Prüfungsvorlagen" #, python-format msgid "Audit parameter %(parameter)s are not allowed" msgstr "Prüfparameter %(parameter)s sind nicht erlaubt" #, python-format msgid "Audit state %(state)s is disallowed." msgstr "Auditstatus %(state)s ist ungültig." #, python-format msgid "Audit type %(audit_type)s could not be found" msgstr "Audit-Typ %(audit_type)s konnte nicht gefunden werden" #, python-format msgid "Audit type %(audit_type)s is disallowed." msgstr "Audit-Typ %(audit_type)s nicht erlaubt." #, python-format msgid "AuditTemplate %(audit_template)s could not be found" msgstr "AuditTemplate %(audit_template)s konnte nicht gefunden werden" msgid "Audits" msgstr "Audits" msgid "Basic offline consolidation" msgstr "Grundlegende Offline-Konsolidierung" msgid "CDMCs" msgstr "CDMCs" msgid "Cannot compile public API routes" msgstr "Öffentliche API-Routen können nicht kompiliert werden" msgid "Cannot create an action directly" msgstr "Eine Aktion kann nicht direkt erstellt werden" msgid "Cannot delete an action directly" msgstr "Eine Aktion kann nicht direkt gelöscht werden" msgid "Cannot modify an action directly" msgstr "Eine Aktion kann nicht direkt geändert werden" msgid "Cannot overwrite UUID for an existing Action Plan." msgstr "" "UUID für einen vorhandenen Aktionsplan kann nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Action." msgstr "UUID kann für eine vorhandene Aktion nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Audit Template." msgstr "UUID für eine vorhandene Auditvorlage kann nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Audit." msgstr "UUID für ein vorhandenes Audit kann nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Goal." msgstr "UUID für ein vorhandenes Ziel kann nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Scoring Engine." msgstr "" "UUID für eine vorhandene Scoring Engine kann nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Strategy." msgstr "UUID kann für eine vorhandene Strategie nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing efficacy indicator." msgstr "" "UUID kann für einen vorhandenen Wirksamkeitsindikator nicht überschrieben " "werden." msgid "Cannot remove 'goal' attribute from an audit template" msgstr "Das Attribut 'goal' kann nicht aus einer Audit-Vorlage entfernt werden" msgid "Conflict" msgstr "Konflikt" #, python-format msgid "" "Could not compute the global efficacy for the '%(goal)s' goal using the " "'%(strategy)s' strategy." msgstr "" "Die globale Wirksamkeit für das Ziel '%(goal)s' konnte nicht mit der " "Strategie '%(strategy)s' berechnet werden." #, python-format msgid "Could not load any strategy for goal %(goal)s" msgstr "Konnte keine Strategie für Ziel %(goal)s laden" #, python-format msgid "Couldn't apply patch '%(patch)s'. Reason: %(reason)s" msgstr "Patch '%(patch)s' konnte nicht angewendet werden. Grund:%(reason)s" #, python-format msgid "Couldn't delete when state is '%(state)s'." msgstr "Konnte nicht gelöscht werden, wenn der Status '%(state)s' ist." #, python-format msgid "Couldn't start when state is '%(state)s'." msgstr "Konnte nicht gestartet werden, wenn der Status '%(state)s' ist." #, python-format msgid "Datasource %(datasource)s is not available." msgstr "Datenquelle %(datasource)s ist nicht verfügbar." #, python-format msgid "Datasource %(datasource)s is not supported by strategy %(strategy)s" msgstr "" "Die Datenquelle %(datasource)s wird von der Strategie %(strategy)s nicht " "unterstützt" msgid "Do you want to delete objects up to the specified maximum number? [y/N]" msgstr "" "Möchten Sie Objekte bis zur angegebenen maximalen Anzahl löschen? [J/N]" #, python-format msgid "Domain name seems ambiguous: %s" msgstr "Domänenname scheint mehrdeutig: %s" #, python-format msgid "Domain not Found: %s" msgstr "Domain nicht gefunden: %s" msgid "Dummy Strategy using sample Scoring Engines" msgstr "Dummy-Strategie mit Sample Scoring Engines" msgid "Dummy goal" msgstr "Dummy Ziel" msgid "Dummy strategy" msgstr "Dummy-Strategie" msgid "Dummy strategy with resize" msgstr "Dummy-Strategie mit Größenänderung" #, python-format msgid "Efficacy indicator %(efficacy_indicator)s could not be found" msgstr "" "Der Wirksamkeitsindikator %(efficacy_indicator)s konnte nicht gefunden werden" #, python-format msgid "Error loading plugin '%(name)s'" msgstr "Fehler beim Laden des Plugins '%(name)s'" #, python-format msgid "ErrorDocumentMiddleware received an invalid status %s" msgstr "ErrorDocumentMiddleware hat einen ungültigen Status %s erhalten" #, python-format msgid "Expected a logical name but received %(name)s" msgstr "Erwartete einen logischen Namen, erhielt aber %(name)s" #, python-format msgid "Expected a logical name or uuid but received %(name)s" msgstr "" "Erwartete einen logischen Namen oder eine UUID, erhielt jedoch %(name)s" #, python-format msgid "Expected a uuid but received %(uuid)s" msgstr "Erwartet eine Uuid aber %(uuid)s erhalten" #, python-format msgid "Expected a uuid or int but received %(identity)s" msgstr "Erwartet eine Uuid oder Int aber %(identity)s erhalten" #, python-format msgid "Expected an interval or cron syntax but received %(name)s" msgstr "Erwartete eine Intervall- oder Cron-Syntax, aber erhielt %(name)s" #, python-format msgid "Failed to create volume '%(volume)s. " msgstr "Fehler beim Erstellen des Datenträgers '%(volume)s." #, python-format msgid "Failed to delete volume '%(volume)s. " msgstr "Fehler beim Löschen des Datenträgers '%(volume)s." #, python-format msgid "Filter operator is not valid: %(operator)s not in %(valid_operators)s" msgstr "" "Filter Operator ist nicht gültig: %(operator)s nicht in %(valid_operators)s" msgid "Filtering actions on both audit and action-plan is prohibited" msgstr "" "Das Filtern von Aktionen sowohl im Audit- als auch im Aktionsplan ist " "verboten" msgid "Goal" msgstr "Ziel" #, python-format msgid "Goal %(goal)s could not be found" msgstr "Ziel %(goal)s konnte nicht gefunden werden" #, python-format msgid "Goal %(goal)s is invalid" msgstr "Ziel %(goal)s ist ungültig" msgid "Goals" msgstr "Ziele" msgid "Hardware Maintenance" msgstr "Hardware-Wartung" #, python-format msgid "Here below is a table containing the objects that can be purged%s:" msgstr "" "Hier unten ist eine Tabelle mit den Objekten, die gelöscht werden können: %s" msgid "Illegal argument" msgstr "Illegales Argument" #, python-format msgid "" "Incorrect mapping: could not find associated weight for %s in weight dict." msgstr "" "Inkorrektes Mapping: Die zugehörige Gewichtung für %s im Gewicht dict konnte " "nicht gefunden werden." #, python-format msgid "Interval of audit must be specified for %(audit_type)s." msgstr "Das Intervall der Prüfung muss für %(audit_type)s angegeben werden." #, python-format msgid "Interval of audit must not be set for %(audit_type)s." msgstr "" "Das Intervall der Prüfung darf nicht für %(audit_type)s festgelegt werden." #, python-format msgid "Invalid filter: %s" msgstr "Ungültiger Filter: %s" msgid "Invalid number of features, expected 9" msgstr "Ungültige Anzahl der erwarteten Features 9" #, python-format msgid "Invalid query: %(start_time)s > %(end_time)s" msgstr "Ungültige Abfrage: %(start_time)s > %(end_time)s" #, python-format msgid "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'" msgstr "Ungültige Sortierrichtung: %s. Akzeptable Werte sind 'asc' oder 'desc'" #, python-format msgid "Invalid sort key: %s" msgstr "Ungültiger Sortierschlüssel: %s" msgid "Invalid state for swapping volume" msgstr "Ungültiger Status für das Auslagern des Datenträgers" #, python-format msgid "Invalid state: %(state)s" msgstr "Ungültiger Status: %(state)s" msgid "JSON list expected in feature argument" msgstr "JSON-Liste in Feature-Argument erwartet" msgid "Limit must be positive" msgstr "Limit muss positiv sein" msgid "Limit should be positive" msgstr "Limit sollte positiv sein" msgid "Maximum time since last check-in for up service." msgstr "Maximale Zeit seit dem letzten Check-in für den Up-Service." #, python-format msgid "Metric: %(metric)s not available" msgstr "Metrik: %(metric)s nicht verfügbar" #, python-format msgid "Migration of type '%(migration_type)s' is not supported." msgstr "Die Migration vom Typ '%(migration_type)s' wird nicht unterstützt." msgid "Minimum Nova API Version" msgstr "Minimale Nova API Version" #, python-format msgid "No %(metric)s metric for %(host)s found." msgstr "Keine %(metric)s Metrik für %(host)s gefunden." #, python-format msgid "No strategy could be found to achieve the '%(goal)s' goal." msgstr "" "Es konnte keine Strategie gefunden werden, um das Ziel '%(goal)s' zu " "erreichen." msgid "Noisy Neighbor" msgstr "Lauter Nachbar" msgid "Not authorized" msgstr "Nicht berechtigt" msgid "Not supported" msgstr "Nicht unterstützt" msgid "Operation not permitted" msgstr "Operation unzulässig" msgid "Outlet temperature based strategy" msgstr "Auslasstemperatur basierte Strategie" #, python-format msgid "" "Payload not populated when trying to send notification \"%(class_name)s\"" msgstr "" "Payload wurde nicht ausgefüllt, wenn versucht wird, eine Benachrichtigung " "'%(class_name)s' zu senden" msgid "Plugins" msgstr "Plugins" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Die Richtlinie lässt nicht zu, dass %(action)s ausgeführt werden." #, python-format msgid "Project name seems ambiguous: %s" msgstr "Der Projektname erscheint mehrdeutig: %s" #, python-format msgid "Project not Found: %s" msgstr "Projekt nicht gefunden: %s" #, python-format msgid "Provided cron is invalid: %(message)s" msgstr "Bereitgestellter Cron ist ungültig: %(message)s" #, python-format msgid "Purge results summary%s:" msgstr "Zusammenfassung der Bereinigungsergebnisse %s:" msgid "" "Ratio of actual attached volumes migrated to planned attached volumes " "migrate." msgstr "" "Das Verhältnis der tatsächlich angehängten Datenträger, die zu geplanten " "angehängten Datenträger migriert wurden, wird migriert." msgid "" "Ratio of actual cold migrated instances to planned cold migrate instances." msgstr "" "Verhältnis von tatsächlichen kalt migrierten Instanzen zu geplanten kalten " "migrieren Instanzen." msgid "" "Ratio of actual detached volumes migrated to planned detached volumes " "migrate." msgstr "" "Das Verhältnis der tatsächlich abgetrennten Datenträger, die in geplante, " "getrennte Datenträger migriert wurden, wird migriert." msgid "" "Ratio of actual live migrated instances to planned live migrate instances." msgstr "" "Verhältnis von tatsächlichen migrierten Live-Instanzen zu geplanten Live-" "Migrationsinstanzen" msgid "" "Ratio of released compute nodes divided by the total number of enabled " "compute nodes." msgstr "" "Verhältnis der freigegebenen Compute-Knoten geteilt durch die Gesamtzahl der " "aktivierten Compute-Knoten." msgid "Request not acceptable." msgstr "Anforderung nicht zulässig." #, python-format msgid "Role name seems ambiguous: %s" msgstr "Der Rollenname scheint mehrdeutig: %s" #, python-format msgid "Role not Found: %s" msgstr "Rolle nicht gefunden: %s" msgid "Saving Energy" msgstr "Energie sparen" msgid "Saving Energy Strategy" msgstr "Energiestrategie speichern" #, python-format msgid "Scoring Engine with name=%s not found" msgstr "Scoring Engine mit name=%s nicht gefunden" #, python-format msgid "ScoringEngine %(scoring_engine)s could not be found" msgstr "ScoringEngine %(scoring_engine)s konnte nicht gefunden werden" msgid "Seconds between running periodic tasks." msgstr "Sekunden zwischen dem Ausführen periodischer Aufgaben." msgid "Server Consolidation" msgstr "Serverkonsolidierung" msgid "" "Specifies the minimum level for which to send notifications. If not set, no " "notifications will be sent. The default is for this option to be at the " "`INFO` level." msgstr "" "Gibt die Mindeststufe an, für die Benachrichtigungen gesendet werden. Wenn " "nicht festgelegt, werden keine Benachrichtigungen gesendet. Standardmäßig " "ist diese Option auf der INFO-Ebene." msgid "" "Specify parameters but no predefined strategy for audit, or no parameter " "spec in predefined strategy" msgstr "" "Geben Sie Parameter, aber keine vordefinierte Strategie für das Audit oder " "keine Parameterspezifikation in der vordefinierten Strategie an" #, python-format msgid "State transition not allowed: (%(initial_state)s -> %(new_state)s)" msgstr "Statusübergang nicht erlaubt: (%(initial_state)s -> %(new_state)s)" msgid "Storage Capacity Balance Strategy" msgstr "Storage Capacity Balance-Strategie" msgid "Strategies" msgstr "Strategien" #, python-format msgid "Strategy %(strategy)s could not be found" msgstr "Strategie %(strategy)s konnte nicht gefunden werden" #, python-format msgid "Strategy %(strategy)s is invalid" msgstr "Strategie %(strategy)s ist ungültig" #, python-format msgid "The %(name)s %(id)s could not be found" msgstr " Die %(name)s %(id)s konnte nicht gefunden werden" #, python-format msgid "The %(name)s resource %(id)s could not be found" msgstr "Die %(name)s Ressource %(id)s konnte nicht gefunden werden" #, python-format msgid "The %(name)s resource %(id)s is not soft deleted" msgstr "Die %(name)s Ressource %(id)s wird nicht weich gelöscht" #, python-format msgid "The action %(action_id)s execution failed." msgstr "Die Ausführung der Aktion %(action_id)s ist fehlgeschlagen." #, python-format msgid "The action description %(action_id)s cannot be found." msgstr "Die Aktionsbeschreibung %(action_id)s konnte nicht gefunden werden." msgid "The audit template UUID or name specified is invalid" msgstr "Die UUID oder der Name der Überprüfungsvorlage ist ungültig" #, python-format msgid "The baremetal resource '%(name)s' could not be found" msgstr "Die Barmetal-Ressource '%(name)s' konnte nicht gefunden werden" #, python-format msgid "The cluster data model '%(cdm)s' could not be built" msgstr "Das Clusterdatenmodell '%(cdm)s' konnte nicht erstellt werden" msgid "The cluster state is not defined" msgstr "Der Clusterstatus ist nicht definiert" msgid "The cluster state is stale" msgstr "Der Clusterstatus ist veraltet" #, python-format msgid "The compute node %(name)s could not be found" msgstr "Der Compute-Knoten %(name)s konnte nicht gefunden werden" #, python-format msgid "The compute resource '%(name)s' could not be found" msgstr "Die Rechenressource '%(name)s' konnte nicht gefunden werden" #, python-format msgid "The identifier '%(name)s' is a reserved word" msgstr "Der Bezeichner '%(name)s' ist ein reserviertes Wort" #, python-format msgid "" "The indicator '%(name)s' with value '%(value)s' and spec type " "'%(spec_type)s' is invalid." msgstr "" "Das Kennzeichen '%(name)s' mit dem Wert '%(value)s' und dem " "Spezifikationstyp '%(spec_type)s' ist ungültig." #, python-format msgid "The instance '%(name)s' could not be found" msgstr "Die Instanz '%(name)s' konnte nicht gefunden werden" #, python-format msgid "The ironic node %(uuid)s could not be found" msgstr "Der Ironic Knoten %(uuid)s konnte nicht gefunden werden" msgid "The number of VM migrations to be performed." msgstr "Die Anzahl der VM-Migrationen, die ausgeführt werden sollen." msgid "The number of attached volumes actually migrated." msgstr "Die Anzahl der angehängten Datenträger wurde tatsächlich migriert." msgid "The number of attached volumes planned to migrate." msgstr "Die Anzahl der angehängten Datenträger, die migriert werden sollen." msgid "The number of compute nodes to be released." msgstr "Die Anzahl der zu veröffentlichenden Compute-Knoten." msgid "The number of detached volumes actually migrated." msgstr "Die Anzahl der gelösten Datenträger wurde tatsächlich migriert." msgid "The number of detached volumes planned to migrate." msgstr "Die Anzahl der gelöschten Datenträger, die migriert werden sollen." msgid "The number of instances actually cold migrated." msgstr "Die Anzahl der tatsächlich kalten Instanzen wurde migriert." msgid "The number of instances actually live migrated." msgstr "Die Anzahl der tatsächlich migrierten Instanzen." msgid "The number of instances planned to cold migrate." msgstr "Die Anzahl der geplanten Fälle für eine Kaltmigration." msgid "The number of instances planned to live migrate." msgstr "Die Anzahl der geplanten Live-Migrationen." #, python-format msgid "" "The number of objects (%(num)s) to delete from the database exceeds the " "maximum number of objects (%(max_number)s) specified." msgstr "" "Die Anzahl der zu löschenden Objekte (%(num)s) aus der Datenbank " "überschreitet die maximale Anzahl der angegebenen Objekte (%(max_number)s)." #, python-format msgid "The pool %(name)s could not be found" msgstr "Der Pool %(name)skonnte nicht gefunden werden" #, python-format msgid "The service %(service)s cannot be found." msgstr "Der Service %(service)s kann nicht gefunden werden." #, python-format msgid "The storage node %(name)s could not be found" msgstr "Der Speicherknoten %(name)s konnte nicht gefunden werden" #, python-format msgid "The storage resource '%(name)s' could not be found" msgstr "Die Speicherressource '%(name)s' konnte nicht gefunden werden" msgid "The target state is not defined" msgstr "Der Zielzustand ist nicht definiert" msgid "The total number of enabled compute nodes." msgstr "Die Gesamtzahl der aktivierten Compute-Knoten." #, python-format msgid "The volume '%(name)s' could not be found" msgstr "Der Datenträger '%(name)s' konnte nicht gefunden werden" #, python-format msgid "There are %(count)d objects set for deletion. Continue? [y/N]" msgstr "Es sind %(count)d Objekte zum Löschen eingestellt. Fortsetzen? [J/N]" msgid "Thermal Optimization" msgstr "Thermische Optimierung" msgid "Total" msgstr "Gesamt" msgid "Unable to parse features: " msgstr "Die Analyse von Features ist nicht möglich:" #, python-format msgid "Unable to parse features: %s" msgstr "Die Funktionen können nicht analysiert werden: %s" msgid "Unacceptable parameters" msgstr "Inakzeptable Parameter" msgid "Unclassified" msgstr "Nicht klassifiziert" #, python-format msgid "Unexpected keystone client error occurred: %s" msgstr "Unerwarteter Keystone Fehler trat auf: %s" msgid "Uniform airflow migration strategy" msgstr "Einheitliche Luftstrommigrationsstrategie" #, python-format msgid "User name seems ambiguous: %s" msgstr "Der Benutzername scheint mehrdeutig zu sein: %s" #, python-format msgid "User not Found: %s" msgstr "Benutzer nicht gefunden: %s" msgid "VM Workload Consolidation Strategy" msgstr "VM-Workload-Konsolidierungsstrategie" msgid "Volume type must be different for retyping" msgstr "Der Volume-Typ muss sich beim erneuten Eintippen unterscheiden" msgid "Volume type must be same for migrating" msgstr "Der Volume-Typ muss für die Migration identisch sein" msgid "" "Watcher database schema is already under version control; use upgrade() " "instead" msgstr "" "Watcher-Datenbankschema ist bereits unter Versionskontrolle; Verwenden Sie " "stattdessen upgrade()" #, python-format msgid "Workflow execution error: %(error)s" msgstr "Workflow-Ausführungsfehler: %(error)s" msgid "Workload Balance Migration Strategy" msgstr "Workload-Balance-Migrationsstrategie" msgid "Workload Balancing" msgstr "Workload-Ausgleich" msgid "Workload stabilization" msgstr "Workload-Stabilisierung" #, python-format msgid "Wrong type. Expected '%(type)s', got '%(value)s'" msgstr "Falscher Typ. Erwartete '%(type)s', bekam '%(value)s'" #, python-format msgid "" "You shouldn't use any other IDs of %(resource)s if you use wildcard " "character." msgstr "" "Sie sollten keine anderen IDs von %(resource)s verwenden, wenn Sie " "Platzhalterzeichen verwenden." msgid "Zone migration" msgstr "Zonenmigration" msgid "destination type is required when migration type is swap" msgstr "Zieltyp ist erforderlich, wenn der Migrationstyp Swap ist" msgid "host_aggregates can't be included and excluded together" msgstr "" "host_aggregates können nicht zusammen eingeschlossen und ausgeschlossen " "werden" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6031353 python_watcher-14.0.0/watcher/locale/en_GB/0000775000175000017500000000000000000000000020523 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6391354 python_watcher-14.0.0/watcher/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000022310 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/locale/en_GB/LC_MESSAGES/watcher.po0000664000175000017500000006447700000000000024327 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata msgid "" msgstr "" "Project-Id-Version: watcher VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2022-08-29 03:03+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2022-05-31 08:38+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid " (may include orphans)" msgstr " (may include orphans)" msgid " (orphans excluded)" msgstr " (orphans excluded)" #, python-format msgid "%(client)s connection failed. Reason: %(reason)s" msgstr "%(client)s connection failed. Reason: %(reason)s" #, python-format msgid "%(field)s can't be updated." msgstr "%(field)s can't be updated." #, python-format msgid "%(parameter)s has to be of type %(parameter_type)s" msgstr "%(parameter)s has to be of type %(parameter_type)s" #, python-format msgid "%s is not JSON serializable" msgstr "%s is not JSON serialisable" #, python-format msgid "" "'%(strategy)s' strategy does relate to the '%(goal)s' goal. Possible " "choices: %(choices)s" msgstr "" "'%(strategy)s' strategy does relate to the '%(goal)s' goal. Possible " "choices: %(choices)s" #, python-format msgid "'%s' is a mandatory attribute and can not be removed" msgstr "'%s' is a mandatory attribute and can not be removed" #, python-format msgid "'%s' is an internal attribute and can not be updated" msgstr "'%s' is an internal attribute and can not be updated" msgid "'add' and 'replace' operations needs value" msgstr "'add' and 'replace' operations needs value" msgid "'obj' argument type is not valid" msgstr "'obj' argument type is not valid" #, python-format msgid "'obj' argument type is not valid: %s" msgstr "'obj' argument type is not valid: %s" #, python-format msgid "A goal with UUID %(uuid)s already exists" msgstr "A goal with UUID %(uuid)s already exists" #, python-format msgid "A scoring engine with UUID %(uuid)s already exists" msgstr "A scoring engine with UUID %(uuid)s already exists" #, python-format msgid "A service with name %(name)s is already working on %(host)s." msgstr "A service with name %(name)s is already working on %(host)s." #, python-format msgid "A strategy with UUID %(uuid)s already exists" msgstr "A strategy with UUID %(uuid)s already exists" msgid "A valid goal_id or audit_template_id must be provided" msgstr "A valid goal_id or audit_template_id must be provided" #, python-format msgid "Action %(action)s could not be found" msgstr "Action %(action)s could not be found" #, python-format msgid "Action %(action)s was not eagerly loaded" msgstr "Action %(action)s was not eagerly loaded" #, python-format msgid "Action Plan %(action_plan)s is currently running." msgstr "Action Plan %(action_plan)s is currently running." #, python-format msgid "Action Plan %(action_plan)s is referenced by one or multiple actions" msgstr "Action Plan %(action_plan)s is referenced by one or multiple actions" #, python-format msgid "Action Plan with UUID %(uuid)s is cancelled by user" msgstr "Action Plan with UUID %(uuid)s is cancelled by user" msgid "Action Plans" msgstr "Action Plans" #, python-format msgid "Action plan %(action_plan)s is invalid" msgstr "Action plan %(action_plan)s is invalid" #, python-format msgid "Action plan %(action_plan)s is referenced by one or multiple goals" msgstr "Action plan %(action_plan)s is referenced by one or multiple goals" #, python-format msgid "Action plan %(action_plan)s was not eagerly loaded" msgstr "Action plan %(action_plan)s was not eagerly loaded" #, python-format msgid "ActionPlan %(action_plan)s could not be found" msgstr "ActionPlan %(action_plan)s could not be found" msgid "Actions" msgstr "Actions" msgid "Actuator" msgstr "Actuator" #, python-format msgid "Adding a new attribute (%s) to the root of the resource is not allowed" msgstr "" "Adding a new attribute (%s) to the root of the resource is not allowed" msgid "Airflow Optimization" msgstr "Airflow Optimisation" #, python-format msgid "An action description with type %(action_type)s is already exist." msgstr "An action description with type %(action_type)s is already exist." #, python-format msgid "An action plan with UUID %(uuid)s already exists" msgstr "An action plan with UUID %(uuid)s already exists" #, python-format msgid "An action with UUID %(uuid)s already exists" msgstr "An action with UUID %(uuid)s already exists" #, python-format msgid "An audit with UUID or name %(audit)s already exists" msgstr "An audit with UUID or name %(audit)s already exists" #, python-format msgid "An audit_template with UUID or name %(audit_template)s already exists" msgstr "An audit_template with UUID or name %(audit_template)s already exists" msgid "An indicator value should be a number" msgstr "An indicator value should be a number" msgid "An unknown exception occurred" msgstr "An unknown exception occurred" msgid "At least one feature is required" msgstr "At least one feature is required" #, python-format msgid "Audit %(audit)s could not be found" msgstr "Audit %(audit)s could not be found" #, python-format msgid "Audit %(audit)s is invalid" msgstr "Audit %(audit)s is invalid" #, python-format msgid "Audit %(audit)s is referenced by one or multiple action plans" msgstr "Audit %(audit)s is referenced by one or multiple action plans" #, python-format msgid "Audit %(audit)s was not eagerly loaded" msgstr "Audit %(audit)s was not eagerly loaded" msgid "Audit Templates" msgstr "Audit Templates" #, python-format msgid "Audit parameter %(parameter)s are not allowed" msgstr "Audit parameter %(parameter)s are not allowed" #, python-format msgid "Audit state %(state)s is disallowed." msgstr "Audit state %(state)s is disallowed." #, python-format msgid "Audit type %(audit_type)s could not be found" msgstr "Audit type %(audit_type)s could not be found" #, python-format msgid "Audit type %(audit_type)s is disallowed." msgstr "Audit type %(audit_type)s is disallowed." #, python-format msgid "AuditTemplate %(audit_template)s could not be found" msgstr "AuditTemplate %(audit_template)s could not be found" msgid "Audits" msgstr "Audits" msgid "Basic offline consolidation" msgstr "Basic offline consolidation" msgid "CDMCs" msgstr "CDMCs" msgid "Cannot compile public API routes" msgstr "Cannot compile public API routes" msgid "Cannot create an action directly" msgstr "Cannot create an action directly" msgid "Cannot delete an action directly" msgstr "Cannot delete an action directly" msgid "Cannot modify an action directly" msgstr "Cannot modify an action directly" msgid "Cannot overwrite UUID for an existing Action Plan." msgstr "Cannot overwrite UUID for an existing Action Plan." msgid "Cannot overwrite UUID for an existing Action." msgstr "Cannot overwrite UUID for an existing Action." msgid "Cannot overwrite UUID for an existing Audit Template." msgstr "Cannot overwrite UUID for an existing Audit Template." msgid "Cannot overwrite UUID for an existing Audit." msgstr "Cannot overwrite UUID for an existing Audit." msgid "Cannot overwrite UUID for an existing Goal." msgstr "Cannot overwrite UUID for an existing Goal." msgid "Cannot overwrite UUID for an existing Scoring Engine." msgstr "Cannot overwrite UUID for an existing Scoring Engine." msgid "Cannot overwrite UUID for an existing Strategy." msgstr "Cannot overwrite UUID for an existing Strategy." msgid "Cannot overwrite UUID for an existing efficacy indicator." msgstr "Cannot overwrite UUID for an existing efficacy indicator." msgid "Cannot remove 'goal' attribute from an audit template" msgstr "Cannot remove 'goal' attribute from an audit template" msgid "Ceilometer helper does not support statistic series method" msgstr "Ceilometer helper does not support statistic series method" msgid "Cluster Maintaining" msgstr "Cluster Maintaining" msgid "Conflict" msgstr "Conflict" #, python-format msgid "" "Could not compute the global efficacy for the '%(goal)s' goal using the " "'%(strategy)s' strategy." msgstr "" "Could not compute the global efficacy for the '%(goal)s' goal using the " "'%(strategy)s' strategy." #, python-format msgid "Could not load any strategy for goal %(goal)s" msgstr "Could not load any strategy for goal %(goal)s" #, python-format msgid "Couldn't apply patch '%(patch)s'. Reason: %(reason)s" msgstr "Couldn't apply patch '%(patch)s'. Reason: %(reason)s" #, python-format msgid "Couldn't delete when state is '%(state)s'." msgstr "Couldn't delete when state is '%(state)s'." #, python-format msgid "Couldn't start when state is '%(state)s'." msgstr "Couldn't start when state is '%(state)s'." #, python-format msgid "Datasource %(datasource)s is not available." msgstr "Datasource %(datasource)s is not available." #, python-format msgid "Datasource %(datasource)s is not supported by strategy %(strategy)s" msgstr "Datasource %(datasource)s is not supported by strategy %(strategy)s" msgid "Do you want to delete objects up to the specified maximum number? [y/N]" msgstr "" "Do you want to delete objects up to the specified maximum number? [y/N]" #, python-format msgid "Domain name seems ambiguous: %s" msgstr "Domain name seems ambiguous: %s" #, python-format msgid "Domain not Found: %s" msgstr "Domain not Found: %s" msgid "Dummy Strategy using sample Scoring Engines" msgstr "Dummy Strategy using sample Scoring Engines" msgid "Dummy goal" msgstr "Dummy goal" msgid "Dummy strategy" msgstr "Dummy strategy" msgid "Dummy strategy with resize" msgstr "Dummy strategy with resize" #, python-format msgid "Efficacy indicator %(efficacy_indicator)s could not be found" msgstr "Efficacy indicator %(efficacy_indicator)s could not be found" #, python-format msgid "Error loading plugin '%(name)s'" msgstr "Error loading plugin '%(name)s'" #, python-format msgid "ErrorDocumentMiddleware received an invalid status %s" msgstr "ErrorDocumentMiddleware received an invalid status %s" msgid "Executing Host Maintenance Migration Strategy" msgstr "Executing Host Maintenance Migration Strategy" #, python-format msgid "Expected a logical name but received %(name)s" msgstr "Expected a logical name but received %(name)s" #, python-format msgid "Expected a logical name or uuid but received %(name)s" msgstr "Expected a logical name or UUID but received %(name)s" #, python-format msgid "Expected a uuid but received %(uuid)s" msgstr "Expected a UUID but received %(uuid)s" #, python-format msgid "Expected a uuid or int but received %(identity)s" msgstr "Expected a UUID or int but received %(identity)s" #, python-format msgid "Expected an interval or cron syntax but received %(name)s" msgstr "Expected an interval or cron syntax but received %(name)s" #, python-format msgid "Failed to create volume '%(volume)s. " msgstr "Failed to create volume '%(volume)s. " #, python-format msgid "Failed to delete volume '%(volume)s. " msgstr "Failed to delete volume '%(volume)s. " #, python-format msgid "Filter operator is not valid: %(operator)s not in %(valid_operators)s" msgstr "Filter operator is not valid: %(operator)s not in %(valid_operators)s" msgid "Filtering actions on both audit and action-plan is prohibited" msgstr "Filtering actions on both audit and action-plan is prohibited" msgid "Goal" msgstr "Goal" #, python-format msgid "Goal %(goal)s could not be found" msgstr "Goal %(goal)s could not be found" #, python-format msgid "Goal %(goal)s is invalid" msgstr "Goal %(goal)s is invalid" msgid "Goals" msgstr "Goals" msgid "Grafana helper does not support statistic series method" msgstr "Grafana helper does not support statistic series method" msgid "Hardware Maintenance" msgstr "Hardware Maintenance" #, python-format msgid "Here below is a table containing the objects that can be purged%s:" msgstr "Here below is a table containing the objects that can be purged%s:" msgid "Host Maintenance Strategy" msgstr "Host Maintenance Strategy" msgid "Illegal argument" msgstr "Illegal argument" #, python-format msgid "" "Incorrect mapping: could not find associated weight for %s in weight dict." msgstr "" "Incorrect mapping: could not find associated weight for %s in weight dict." #, python-format msgid "Interval of audit must be specified for %(audit_type)s." msgstr "Interval of audit must be specified for %(audit_type)s." #, python-format msgid "Interval of audit must not be set for %(audit_type)s." msgstr "Interval of audit must not be set for %(audit_type)s." #, python-format msgid "Invalid filter: %s" msgstr "Invalid filter: %s" msgid "Invalid number of features, expected 9" msgstr "Invalid number of features, expected 9" #, python-format msgid "Invalid query: %(start_time)s > %(end_time)s" msgstr "Invalid query: %(start_time)s > %(end_time)s" #, python-format msgid "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'" msgstr "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'" #, python-format msgid "Invalid sort key: %s" msgstr "Invalid sort key: %s" msgid "Invalid state for swapping volume" msgstr "Invalid state for swapping volume" #, python-format msgid "Invalid state: %(state)s" msgstr "Invalid state: %(state)s" msgid "JSON list expected in feature argument" msgstr "JSON list expected in feature argument" msgid "Limit must be positive" msgstr "Limit must be positive" msgid "Limit should be positive" msgstr "Limit should be positive" msgid "Maximum time since last check-in for up service." msgstr "Maximum time since last check-in for up service." #, python-format msgid "Metric: %(metric)s not available" msgstr "Metric: %(metric)s not available" #, python-format msgid "Migration of type '%(migration_type)s' is not supported." msgstr "Migration of type '%(migration_type)s' is not supported." msgid "Minimum Nova API Version" msgstr "Minimum Nova API Version" msgid "" "Name of this node. This can be an opaque identifier. It is not necessarily a " "hostname, FQDN, or IP address. However, the node name must be valid within " "an AMQP key." msgstr "" "Name of this node. This can be an opaque identifier. It is not necessarily a " "hostname, FQDN, or IP address. However, the node name must be valid within " "an AMQP key." #, python-format msgid "No %(metric)s metric for %(host)s found." msgstr "No %(metric)s metric for %(host)s found." msgid "No datasources available" msgstr "No datasources available" #, python-format msgid "No strategy could be found to achieve the '%(goal)s' goal." msgstr "No strategy could be found to achieve the '%(goal)s' goal." msgid "Node Resource Consolidation strategy" msgstr "Node Resource Consolidation strategy" msgid "Noisy Neighbor" msgstr "Noisy Neighbour" msgid "Not authorized" msgstr "Not authorised" msgid "Not supported" msgstr "Not supported" msgid "Operation not permitted" msgstr "Operation not permitted" msgid "Outlet temperature based strategy" msgstr "Outlet temperature based strategy" #, python-format msgid "" "Payload not populated when trying to send notification \"%(class_name)s\"" msgstr "" "Payload not populated when trying to send notification \"%(class_name)s\"" msgid "Plugins" msgstr "Plugins" msgid "Policy File JSON to YAML Migration" msgstr "Policy File JSON to YAML Migration" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Policy doesn't allow %(action)s to be performed." #, python-format msgid "Project name seems ambiguous: %s" msgstr "Project name seems ambiguous: %s" #, python-format msgid "Project not Found: %s" msgstr "Project not Found: %s" #, python-format msgid "Provided %(action_type)s is not supported yet" msgstr "Provided %(action_type)s is not supported yet" #, python-format msgid "Provided cron is invalid: %(message)s" msgstr "Provided cron is invalid: %(message)s" #, python-format msgid "Purge results summary%s:" msgstr "Purge results summary%s:" msgid "" "Ratio of actual attached volumes migrated to planned attached volumes " "migrate." msgstr "" "Ratio of actual attached volumes migrated to planned attached volumes " "migrate." msgid "" "Ratio of actual cold migrated instances to planned cold migrate instances." msgstr "" "Ratio of actual cold migrated instances to planned cold migrate instances." msgid "" "Ratio of actual detached volumes migrated to planned detached volumes " "migrate." msgstr "" "Ratio of actual detached volumes migrated to planned detached volumes " "migrate." msgid "" "Ratio of actual live migrated instances to planned live migrate instances." msgstr "" "Ratio of actual live migrated instances to planned live migrate instances." msgid "Ratio of migrated virtual machines to audited virtual machines" msgstr "Ratio of migrated virtual machines to audited virtual machines" msgid "" "Ratio of released compute nodes divided by the total number of enabled " "compute nodes." msgstr "" "Ratio of released compute nodes divided by the total number of enabled " "compute nodes." msgid "Request not acceptable." msgstr "Request not acceptable." #, python-format msgid "Role name seems ambiguous: %s" msgstr "Role name seems ambiguous: %s" #, python-format msgid "Role not Found: %s" msgstr "Role not Found: %s" msgid "Saving Energy" msgstr "Saving Energy" msgid "Saving Energy Strategy" msgstr "Saving Energy Strategy" #, python-format msgid "Scoring Engine with name=%s not found" msgstr "Scoring Engine with name=%s not found" #, python-format msgid "ScoringEngine %(scoring_engine)s could not be found" msgstr "ScoringEngine %(scoring_engine)s could not be found" msgid "Seconds between running periodic tasks." msgstr "Seconds between running periodic tasks." msgid "Server Consolidation" msgstr "Server Consolidation" msgid "" "Specifies the minimum level for which to send notifications. If not set, no " "notifications will be sent. The default is for this option to be at the " "`INFO` level." msgstr "" "Specifies the minimum level for which to send notifications. If not set, no " "notifications will be sent. The default is for this option to be at the " "`INFO` level." msgid "" "Specify parameters but no predefined strategy for audit, or no parameter " "spec in predefined strategy" msgstr "" "Specify parameters but no predefined strategy for audit, or no parameter " "spec in predefined strategy" #, python-format msgid "Start or End time of audit must not be set for %(audit_type)s." msgstr "Start or End time of audit must not be set for %(audit_type)s." #, python-format msgid "State transition not allowed: (%(initial_state)s -> %(new_state)s)" msgstr "State transition not allowed: (%(initial_state)s -> %(new_state)s)" msgid "Storage Capacity Balance Strategy" msgstr "Storage Capacity Balance Strategy" msgid "Strategies" msgstr "Strategies" #, python-format msgid "Strategy %(strategy)s could not be found" msgstr "Strategy %(strategy)s could not be found" #, python-format msgid "Strategy %(strategy)s is invalid" msgstr "Strategy %(strategy)s is invalid" #, python-format msgid "The %(data_model_type)s data model could not be found" msgstr "The %(data_model_type)s data model could not be found" #, python-format msgid "The %(name)s %(id)s could not be found" msgstr "The %(name)s %(id)s could not be found" #, python-format msgid "The %(name)s pool %(attribute)s is not integer" msgstr "The %(name)s pool %(attribute)s is not integer" #, python-format msgid "The %(name)s resource %(id)s could not be found" msgstr "The %(name)s resource %(id)s could not be found" #, python-format msgid "The %(name)s resource %(id)s is not soft deleted" msgstr "The %(name)s resource %(id)s is not soft deleted" #, python-format msgid "The action %(action_id)s execution failed." msgstr "The action %(action_id)s execution failed." #, python-format msgid "The action description %(action_id)s cannot be found." msgstr "The action description %(action_id)s cannot be found." msgid "The audit template UUID or name specified is invalid" msgstr "The audit template UUID or name specified is invalid" #, python-format msgid "The baremetal resource '%(name)s' could not be found" msgstr "The baremetal resource '%(name)s' could not be found" #, python-format msgid "The cluster data model '%(cdm)s' could not be built" msgstr "The cluster data model '%(cdm)s' could not be built" msgid "The cluster state is not defined" msgstr "The cluster state is not defined" msgid "The cluster state is stale" msgstr "The cluster state is stale" #, python-format msgid "The compute node %(name)s could not be found" msgstr "The compute node %(name)s could not be found" #, python-format msgid "The compute resource '%(name)s' could not be found" msgstr "The compute resource '%(name)s' could not be found" #, python-format msgid "The identifier '%(name)s' is a reserved word" msgstr "The identifier '%(name)s' is a reserved word" #, python-format msgid "" "The indicator '%(name)s' with value '%(value)s' and spec type " "'%(spec_type)s' is invalid." msgstr "" "The indicator '%(name)s' with value '%(value)s' and spec type " "'%(spec_type)s' is invalid." #, python-format msgid "The instance '%(name)s' could not be found" msgstr "The instance '%(name)s' could not be found" #, python-format msgid "The ironic node %(uuid)s could not be found" msgstr "The Ironic node %(uuid)s could not be found" #, python-format msgid "The mapped compute node for instance '%(uuid)s' could not be found." msgstr "The mapped compute node for instance '%(uuid)s' could not be found." msgid "The node status is not defined" msgstr "The node status is not defined" msgid "The number of VM migrations to be performed." msgstr "The number of VM migrations to be performed." msgid "The number of attached volumes actually migrated." msgstr "The number of attached volumes actually migrated." msgid "The number of attached volumes planned to migrate." msgstr "The number of attached volumes planned to migrate." msgid "The number of compute nodes to be released." msgstr "The number of compute nodes to be released." msgid "The number of detached volumes actually migrated." msgstr "The number of detached volumes actually migrated." msgid "The number of detached volumes planned to migrate." msgstr "The number of detached volumes planned to migrate." msgid "The number of instances actually cold migrated." msgstr "The number of instances actually cold migrated." msgid "The number of instances actually live migrated." msgstr "The number of instances actually live migrated." msgid "The number of instances planned to cold migrate." msgstr "The number of instances planned to cold migrate." msgid "The number of instances planned to live migrate." msgstr "The number of instances planned to live migrate." #, python-format msgid "" "The number of objects (%(num)s) to delete from the database exceeds the " "maximum number of objects (%(max_number)s) specified." msgstr "" "The number of objects (%(num)s) to delete from the database exceeds the " "maximum number of objects (%(max_number)s) specified." #, python-format msgid "The pool %(name)s could not be found" msgstr "The pool %(name)s could not be found" #, python-format msgid "The service %(service)s cannot be found." msgstr "The service %(service)s cannot be found." #, python-format msgid "The storage node %(name)s could not be found" msgstr "The storage node %(name)s could not be found" #, python-format msgid "The storage resource '%(name)s' could not be found" msgstr "The storage resource '%(name)s' could not be found" msgid "The target state is not defined" msgstr "The target state is not defined" msgid "The total number of audited instances in strategy." msgstr "The total number of audited instances in strategy." msgid "The total number of enabled compute nodes." msgstr "The total number of enabled compute nodes." #, python-format msgid "The value %(value)s for parameter %(parameter)s is invalid" msgstr "The value %(value)s for parameter %(parameter)s is invalid" msgid "The value of original standard deviation." msgstr "The value of original standard deviation." msgid "The value of resulted standard deviation." msgstr "The value of resulted standard deviation." #, python-format msgid "The volume '%(name)s' could not be found" msgstr "The volume '%(name)s' could not be found" #, python-format msgid "There are %(count)d objects set for deletion. Continue? [y/N]" msgstr "There are %(count)d objects set for deletion. Continue? [y/N]" msgid "Thermal Optimization" msgstr "Thermal Optimisation" msgid "Total" msgstr "Total" msgid "Unable to parse features: " msgstr "Unable to parse features: " #, python-format msgid "Unable to parse features: %s" msgstr "Unable to parse features: %s" msgid "Unacceptable parameters" msgstr "Unacceptable parameters" msgid "Unclassified" msgstr "Unclassified" #, python-format msgid "Unexpected keystone client error occurred: %s" msgstr "Unexpected Keystone client error occurred: %s" msgid "Uniform airflow migration strategy" msgstr "Uniform airflow migration strategy" #, python-format msgid "User name seems ambiguous: %s" msgstr "User name seems ambiguous: %s" #, python-format msgid "User not Found: %s" msgstr "User not Found: %s" msgid "VM Workload Consolidation Strategy" msgstr "VM Workload Consolidation Strategy" msgid "Volume type must be different for retyping" msgstr "Volume type must be different for retyping" msgid "Volume type must be same for migrating" msgstr "Volume type must be same for migrating" msgid "" "Watcher database schema is already under version control; use upgrade() " "instead" msgstr "" "Watcher database schema is already under version control; use upgrade() " "instead" #, python-format msgid "Workflow execution error: %(error)s" msgstr "Workflow execution error: %(error)s" msgid "Workload Balance Migration Strategy" msgstr "Workload Balance Migration Strategy" msgid "Workload Balancing" msgstr "Workload Balancing" msgid "Workload stabilization" msgstr "Workload stabilisation" #, python-format msgid "Wrong type. Expected '%(type)s', got '%(value)s'" msgstr "Wrong type. Expected '%(type)s', got '%(value)s'" #, python-format msgid "" "You shouldn't use any other IDs of %(resource)s if you use wildcard " "character." msgstr "" "You shouldn't use any other IDs of %(resource)s if you use wildcard " "character." msgid "Zone migration" msgstr "Zone migration" msgid "destination type is required when migration type is swap" msgstr "destination type is required when migration type is swap" msgid "host_aggregates can't be included and excluded together" msgstr "host_aggregates can't be included and excluded together" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6391354 python_watcher-14.0.0/watcher/notifications/0000775000175000017500000000000000000000000021163 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/notifications/__init__.py0000664000175000017500000000237600000000000023304 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Note(gibi): Importing publicly called functions so the caller code does not # need to be changed after we moved these function inside the package # Todo(gibi): remove these imports after legacy notifications using these are # transformed to versioned notifications from watcher.notifications import action # noqa from watcher.notifications import action_plan # noqa from watcher.notifications import audit # noqa from watcher.notifications import exception # noqa from watcher.notifications import goal # noqa from watcher.notifications import service # noqa from watcher.notifications import strategy # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/notifications/action.py0000664000175000017500000002666300000000000023027 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.common import context as wcontext from watcher.common import exception from watcher.notifications import action_plan as ap_notifications from watcher.notifications import base as notificationbase from watcher.notifications import exception as exception_notifications from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields CONF = cfg.CONF @base.WatcherObjectRegistry.register_notification class ActionPayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'uuid': ('action', 'uuid'), 'action_type': ('action', 'action_type'), 'input_parameters': ('action', 'input_parameters'), 'state': ('action', 'state'), 'parents': ('action', 'parents'), 'created_at': ('action', 'created_at'), 'updated_at': ('action', 'updated_at'), 'deleted_at': ('action', 'deleted_at'), } # Version 1.0: Initial version VERSION = '1.0' fields = { 'uuid': wfields.UUIDField(), 'action_type': wfields.StringField(nullable=False), 'input_parameters': wfields.DictField(nullable=False, default={}), 'state': wfields.StringField(nullable=False), 'parents': wfields.ListOfUUIDsField(nullable=False, default=[]), 'action_plan_uuid': wfields.UUIDField(), 'action_plan': wfields.ObjectField('TerseActionPlanPayload'), 'created_at': wfields.DateTimeField(nullable=True), 'updated_at': wfields.DateTimeField(nullable=True), 'deleted_at': wfields.DateTimeField(nullable=True), } def __init__(self, action, **kwargs): super(ActionPayload, self).__init__(**kwargs) self.populate_schema(action=action) @base.WatcherObjectRegistry.register_notification class ActionStateUpdatePayload(notificationbase.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'old_state': wfields.StringField(nullable=True), 'state': wfields.StringField(nullable=True), } @base.WatcherObjectRegistry.register_notification class ActionCreatePayload(ActionPayload): # Version 1.0: Initial version VERSION = '1.0' fields = {} def __init__(self, action, action_plan): super(ActionCreatePayload, self).__init__( action=action, action_plan=action_plan) @base.WatcherObjectRegistry.register_notification class ActionUpdatePayload(ActionPayload): # Version 1.0: Initial version VERSION = '1.0' fields = { 'state_update': wfields.ObjectField('ActionStateUpdatePayload'), } def __init__(self, action, state_update, action_plan): super(ActionUpdatePayload, self).__init__( action=action, state_update=state_update, action_plan=action_plan) @base.WatcherObjectRegistry.register_notification class ActionExecutionPayload(ActionPayload): # Version 1.0: Initial version VERSION = '1.0' fields = { 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), } def __init__(self, action, action_plan, **kwargs): super(ActionExecutionPayload, self).__init__( action=action, action_plan=action_plan, **kwargs) @base.WatcherObjectRegistry.register_notification class ActionCancelPayload(ActionPayload): # Version 1.0: Initial version VERSION = '1.0' fields = { 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), } def __init__(self, action, action_plan, **kwargs): super(ActionCancelPayload, self).__init__( action=action, action_plan=action_plan, **kwargs) @base.WatcherObjectRegistry.register_notification class ActionDeletePayload(ActionPayload): # Version 1.0: Initial version VERSION = '1.0' fields = {} def __init__(self, action, action_plan): super(ActionDeletePayload, self).__init__( action=action, action_plan=action_plan) @notificationbase.notification_sample('action-execution-error.json') @notificationbase.notification_sample('action-execution-end.json') @notificationbase.notification_sample('action-execution-start.json') @base.WatcherObjectRegistry.register_notification class ActionExecutionNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionExecutionPayload') } @notificationbase.notification_sample('action-create.json') @base.WatcherObjectRegistry.register_notification class ActionCreateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionCreatePayload') } @notificationbase.notification_sample('action-update.json') @base.WatcherObjectRegistry.register_notification class ActionUpdateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionUpdatePayload') } @notificationbase.notification_sample('action-delete.json') @base.WatcherObjectRegistry.register_notification class ActionDeleteNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionDeletePayload') } @notificationbase.notification_sample('action-cancel-error.json') @notificationbase.notification_sample('action-cancel-end.json') @notificationbase.notification_sample('action-cancel-start.json') @base.WatcherObjectRegistry.register_notification class ActionCancelNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionCancelPayload') } def _get_action_plan_payload(action): action_plan = None strategy_uuid = None audit = None try: action_plan = action.action_plan audit = objects.Audit.get(wcontext.make_context(show_deleted=True), action_plan.audit_id) if audit.strategy_id: strategy_uuid = objects.Strategy.get( wcontext.make_context(show_deleted=True), audit.strategy_id).uuid except NotImplementedError: raise exception.EagerlyLoadedActionRequired(action=action.uuid) action_plan_payload = ap_notifications.TerseActionPlanPayload( action_plan=action_plan, audit_uuid=audit.uuid, strategy_uuid=strategy_uuid) return action_plan_payload def send_create(context, action, service='infra-optim', host=None): """Emit an action.create notification.""" action_plan_payload = _get_action_plan_payload(action) versioned_payload = ActionCreatePayload( action=action, action_plan=action_plan_payload, ) notification = ActionCreateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action', action=wfields.NotificationAction.CREATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_update(context, action, service='infra-optim', host=None, old_state=None): """Emit an action.update notification.""" action_plan_payload = _get_action_plan_payload(action) state_update = ActionStateUpdatePayload( old_state=old_state, state=action.state if old_state else None) versioned_payload = ActionUpdatePayload( action=action, state_update=state_update, action_plan=action_plan_payload, ) notification = ActionUpdateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_delete(context, action, service='infra-optim', host=None): """Emit an action.delete notification.""" action_plan_payload = _get_action_plan_payload(action) versioned_payload = ActionDeletePayload( action=action, action_plan=action_plan_payload, ) notification = ActionDeleteNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action', action=wfields.NotificationAction.DELETE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_execution_notification(context, action, notification_action, phase, priority=wfields.NotificationPriority.INFO, service='infra-optim', host=None): """Emit an action execution notification.""" action_plan_payload = _get_action_plan_payload(action) fault = None if phase == wfields.NotificationPhase.ERROR: fault = exception_notifications.ExceptionPayload.from_exception() versioned_payload = ActionExecutionPayload( action=action, action_plan=action_plan_payload, fault=fault, ) notification = ActionExecutionNotification( priority=priority, event_type=notificationbase.EventType( object='action', action=notification_action, phase=phase), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_cancel_notification(context, action, notification_action, phase, priority=wfields.NotificationPriority.INFO, service='infra-optim', host=None): """Emit an action cancel notification.""" action_plan_payload = _get_action_plan_payload(action) fault = None if phase == wfields.NotificationPhase.ERROR: fault = exception_notifications.ExceptionPayload.from_exception() versioned_payload = ActionCancelPayload( action=action, action_plan=action_plan_payload, fault=fault, ) notification = ActionCancelNotification( priority=priority, event_type=notificationbase.EventType( object='action', action=notification_action, phase=phase), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/notifications/action_plan.py0000664000175000017500000003241500000000000024031 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.common import context as wcontext from watcher.common import exception from watcher.notifications import audit as audit_notifications from watcher.notifications import base as notificationbase from watcher.notifications import exception as exception_notifications from watcher.notifications import strategy as strategy_notifications from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields CONF = cfg.CONF @base.WatcherObjectRegistry.register_notification class TerseActionPlanPayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'uuid': ('action_plan', 'uuid'), 'state': ('action_plan', 'state'), 'global_efficacy': ('action_plan', 'global_efficacy'), 'created_at': ('action_plan', 'created_at'), 'updated_at': ('action_plan', 'updated_at'), 'deleted_at': ('action_plan', 'deleted_at'), } # Version 1.0: Initial version # Version 1.1: Changed 'global_efficacy' type Dictionary to List VERSION = '1.1' fields = { 'uuid': wfields.UUIDField(), 'state': wfields.StringField(), 'global_efficacy': wfields.FlexibleListOfDictField(nullable=True), 'audit_uuid': wfields.UUIDField(), 'strategy_uuid': wfields.UUIDField(nullable=True), 'created_at': wfields.DateTimeField(nullable=True), 'updated_at': wfields.DateTimeField(nullable=True), 'deleted_at': wfields.DateTimeField(nullable=True), } def __init__(self, action_plan, audit=None, strategy=None, **kwargs): super(TerseActionPlanPayload, self).__init__(audit=audit, strategy=strategy, **kwargs) self.populate_schema(action_plan=action_plan) @base.WatcherObjectRegistry.register_notification class ActionPlanPayload(TerseActionPlanPayload): SCHEMA = { 'uuid': ('action_plan', 'uuid'), 'state': ('action_plan', 'state'), 'global_efficacy': ('action_plan', 'global_efficacy'), 'created_at': ('action_plan', 'created_at'), 'updated_at': ('action_plan', 'updated_at'), 'deleted_at': ('action_plan', 'deleted_at'), } # Version 1.0: Initial version # Vesrsion 1.1: changed global_efficacy type VERSION = '1.1' fields = { 'audit': wfields.ObjectField('TerseAuditPayload'), 'strategy': wfields.ObjectField('StrategyPayload'), } def __init__(self, action_plan, audit, strategy, **kwargs): if not kwargs.get('audit_uuid'): kwargs['audit_uuid'] = audit.uuid if strategy and not kwargs.get('strategy_uuid'): kwargs['strategy_uuid'] = strategy.uuid super(ActionPlanPayload, self).__init__( action_plan, audit=audit, strategy=strategy, **kwargs) @base.WatcherObjectRegistry.register_notification class ActionPlanStateUpdatePayload(notificationbase.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'old_state': wfields.StringField(nullable=True), 'state': wfields.StringField(nullable=True), } @base.WatcherObjectRegistry.register_notification class ActionPlanCreatePayload(ActionPlanPayload): # Version 1.0: Initial version # Version 1.1: Changed global_efficacy_type VERSION = '1.1' fields = {} def __init__(self, action_plan, audit, strategy): super(ActionPlanCreatePayload, self).__init__( action_plan=action_plan, audit=audit, strategy=strategy) @base.WatcherObjectRegistry.register_notification class ActionPlanUpdatePayload(ActionPlanPayload): # Version 1.0: Initial version # Version 1.1: Changed global_efficacy_type VERSION = '1.1' fields = { 'state_update': wfields.ObjectField('ActionPlanStateUpdatePayload'), } def __init__(self, action_plan, state_update, audit, strategy): super(ActionPlanUpdatePayload, self).__init__( action_plan=action_plan, state_update=state_update, audit=audit, strategy=strategy) @base.WatcherObjectRegistry.register_notification class ActionPlanActionPayload(ActionPlanPayload): # Version 1.0: Initial version # Version 1.1: Changed global_efficacy_type VERSION = '1.1' fields = { 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), } def __init__(self, action_plan, audit, strategy, **kwargs): super(ActionPlanActionPayload, self).__init__( action_plan=action_plan, audit=audit, strategy=strategy, **kwargs) @base.WatcherObjectRegistry.register_notification class ActionPlanDeletePayload(ActionPlanPayload): # Version 1.0: Initial version # Version 1.1: Changed global_efficacy_type VERSION = '1.1' fields = {} def __init__(self, action_plan, audit, strategy): super(ActionPlanDeletePayload, self).__init__( action_plan=action_plan, audit=audit, strategy=strategy) @base.WatcherObjectRegistry.register_notification class ActionPlanCancelPayload(ActionPlanPayload): # Version 1.0: Initial version # Version 1.1: Changed global_efficacy_type VERSION = '1.1' fields = { 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), } def __init__(self, action_plan, audit, strategy, **kwargs): super(ActionPlanCancelPayload, self).__init__( action_plan=action_plan, audit=audit, strategy=strategy, **kwargs) @notificationbase.notification_sample('action_plan-execution-error.json') @notificationbase.notification_sample('action_plan-execution-end.json') @notificationbase.notification_sample('action_plan-execution-start.json') @base.WatcherObjectRegistry.register_notification class ActionPlanActionNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionPlanActionPayload') } @notificationbase.notification_sample('action_plan-create.json') @base.WatcherObjectRegistry.register_notification class ActionPlanCreateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionPlanCreatePayload') } @notificationbase.notification_sample('action_plan-update.json') @base.WatcherObjectRegistry.register_notification class ActionPlanUpdateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionPlanUpdatePayload') } @notificationbase.notification_sample('action_plan-delete.json') @base.WatcherObjectRegistry.register_notification class ActionPlanDeleteNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionPlanDeletePayload') } @notificationbase.notification_sample('action_plan-cancel-error.json') @notificationbase.notification_sample('action_plan-cancel-end.json') @notificationbase.notification_sample('action_plan-cancel-start.json') @base.WatcherObjectRegistry.register_notification class ActionPlanCancelNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionPlanCancelPayload') } def _get_common_payload(action_plan): audit = None strategy = None try: audit = action_plan.audit strategy = action_plan.strategy except NotImplementedError: raise exception.EagerlyLoadedActionPlanRequired( action_plan=action_plan.uuid) goal = objects.Goal.get( wcontext.make_context(show_deleted=True), audit.goal_id) audit_payload = audit_notifications.TerseAuditPayload( audit=audit, goal_uuid=goal.uuid) strategy_payload = strategy_notifications.StrategyPayload( strategy=strategy) return audit_payload, strategy_payload def send_create(context, action_plan, service='infra-optim', host=None): """Emit an action_plan.create notification.""" audit_payload, strategy_payload = _get_common_payload(action_plan) versioned_payload = ActionPlanCreatePayload( action_plan=action_plan, audit=audit_payload, strategy=strategy_payload, ) notification = ActionPlanCreateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action_plan', action=wfields.NotificationAction.CREATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_update(context, action_plan, service='infra-optim', host=None, old_state=None): """Emit an action_plan.update notification.""" audit_payload, strategy_payload = _get_common_payload(action_plan) state_update = ActionPlanStateUpdatePayload( old_state=old_state, state=action_plan.state if old_state else None) versioned_payload = ActionPlanUpdatePayload( action_plan=action_plan, state_update=state_update, audit=audit_payload, strategy=strategy_payload, ) notification = ActionPlanUpdateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action_plan', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_delete(context, action_plan, service='infra-optim', host=None): """Emit an action_plan.delete notification.""" audit_payload, strategy_payload = _get_common_payload(action_plan) versioned_payload = ActionPlanDeletePayload( action_plan=action_plan, audit=audit_payload, strategy=strategy_payload, ) notification = ActionPlanDeleteNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action_plan', action=wfields.NotificationAction.DELETE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_action_notification(context, action_plan, action, phase=None, priority=wfields.NotificationPriority.INFO, service='infra-optim', host=None): """Emit an action_plan action notification.""" audit_payload, strategy_payload = _get_common_payload(action_plan) fault = None if phase == wfields.NotificationPhase.ERROR: fault = exception_notifications.ExceptionPayload.from_exception() versioned_payload = ActionPlanActionPayload( action_plan=action_plan, audit=audit_payload, strategy=strategy_payload, fault=fault, ) notification = ActionPlanActionNotification( priority=priority, event_type=notificationbase.EventType( object='action_plan', action=action, phase=phase), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_cancel_notification(context, action_plan, action, phase=None, priority=wfields.NotificationPriority.INFO, service='infra-optim', host=None): """Emit an action_plan cancel notification.""" audit_payload, strategy_payload = _get_common_payload(action_plan) fault = None if phase == wfields.NotificationPhase.ERROR: fault = exception_notifications.ExceptionPayload.from_exception() versioned_payload = ActionPlanCancelPayload( action_plan=action_plan, audit=audit_payload, strategy=strategy_payload, fault=fault, ) notification = ActionPlanCancelNotification( priority=priority, event_type=notificationbase.EventType( object='action_plan', action=action, phase=phase), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/notifications/audit.py0000664000175000017500000002744700000000000022661 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.common import exception from watcher.notifications import base as notificationbase from watcher.notifications import exception as exception_notifications from watcher.notifications import goal as goal_notifications from watcher.notifications import strategy as strategy_notifications from watcher.objects import base from watcher.objects import fields as wfields CONF = cfg.CONF @base.WatcherObjectRegistry.register_notification class TerseAuditPayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'uuid': ('audit', 'uuid'), 'name': ('audit', 'name'), 'audit_type': ('audit', 'audit_type'), 'state': ('audit', 'state'), 'parameters': ('audit', 'parameters'), 'interval': ('audit', 'interval'), 'scope': ('audit', 'scope'), 'auto_trigger': ('audit', 'auto_trigger'), 'next_run_time': ('audit', 'next_run_time'), 'created_at': ('audit', 'created_at'), 'updated_at': ('audit', 'updated_at'), 'deleted_at': ('audit', 'deleted_at'), } # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' boolean field, # Added 'next_run_time' DateTime field, # 'interval' type has been changed from Integer to String # Version 1.2: Added 'name' string field VERSION = '1.2' fields = { 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'audit_type': wfields.StringField(), 'state': wfields.StringField(), 'parameters': wfields.FlexibleDictField(nullable=True), 'interval': wfields.StringField(nullable=True), 'scope': wfields.FlexibleListOfDictField(nullable=True), 'goal_uuid': wfields.UUIDField(), 'strategy_uuid': wfields.UUIDField(nullable=True), 'auto_trigger': wfields.BooleanField(), 'next_run_time': wfields.DateTimeField(nullable=True), 'created_at': wfields.DateTimeField(nullable=True), 'updated_at': wfields.DateTimeField(nullable=True), 'deleted_at': wfields.DateTimeField(nullable=True), } def __init__(self, audit, goal_uuid, strategy_uuid=None, **kwargs): super(TerseAuditPayload, self).__init__( goal_uuid=goal_uuid, strategy_uuid=strategy_uuid, **kwargs) self.populate_schema(audit=audit) @base.WatcherObjectRegistry.register_notification class AuditPayload(TerseAuditPayload): SCHEMA = { 'uuid': ('audit', 'uuid'), 'name': ('audit', 'name'), 'audit_type': ('audit', 'audit_type'), 'state': ('audit', 'state'), 'parameters': ('audit', 'parameters'), 'interval': ('audit', 'interval'), 'scope': ('audit', 'scope'), 'auto_trigger': ('audit', 'auto_trigger'), 'next_run_time': ('audit', 'next_run_time'), 'created_at': ('audit', 'created_at'), 'updated_at': ('audit', 'updated_at'), 'deleted_at': ('audit', 'deleted_at'), } # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' field, # Added 'next_run_time' field # Version 1.2: Added 'name' string field VERSION = '1.2' fields = { 'goal': wfields.ObjectField('GoalPayload'), 'strategy': wfields.ObjectField('StrategyPayload', nullable=True), } def __init__(self, audit, goal, strategy=None, **kwargs): if not kwargs.get('goal_uuid'): kwargs['goal_uuid'] = goal.uuid if strategy and not kwargs.get('strategy_uuid'): kwargs['strategy_uuid'] = strategy.uuid super(AuditPayload, self).__init__( audit=audit, goal=goal, strategy=strategy, **kwargs) @base.WatcherObjectRegistry.register_notification class AuditStateUpdatePayload(notificationbase.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'old_state': wfields.StringField(nullable=True), 'state': wfields.StringField(nullable=True), } @base.WatcherObjectRegistry.register_notification class AuditCreatePayload(AuditPayload): # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' field, # Added 'next_run_time' field VERSION = '1.1' fields = {} def __init__(self, audit, goal, strategy): super(AuditCreatePayload, self).__init__( audit=audit, goal=goal, goal_uuid=goal.uuid, strategy=strategy) @base.WatcherObjectRegistry.register_notification class AuditUpdatePayload(AuditPayload): # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' field, # Added 'next_run_time' field VERSION = '1.1' fields = { 'state_update': wfields.ObjectField('AuditStateUpdatePayload'), } def __init__(self, audit, state_update, goal, strategy): super(AuditUpdatePayload, self).__init__( audit=audit, state_update=state_update, goal=goal, goal_uuid=goal.uuid, strategy=strategy) @base.WatcherObjectRegistry.register_notification class AuditActionPayload(AuditPayload): # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' field, # Added 'next_run_time' field VERSION = '1.1' fields = { 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), } def __init__(self, audit, goal, strategy, **kwargs): super(AuditActionPayload, self).__init__( audit=audit, goal=goal, goal_uuid=goal.uuid, strategy=strategy, **kwargs) @base.WatcherObjectRegistry.register_notification class AuditDeletePayload(AuditPayload): # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' field, # Added 'next_run_time' field VERSION = '1.1' fields = {} def __init__(self, audit, goal, strategy): super(AuditDeletePayload, self).__init__( audit=audit, goal=goal, goal_uuid=goal.uuid, strategy=strategy) @notificationbase.notification_sample('audit-strategy-error.json') @notificationbase.notification_sample('audit-strategy-end.json') @notificationbase.notification_sample('audit-strategy-start.json') @base.WatcherObjectRegistry.register_notification class AuditActionNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('AuditActionPayload') } @notificationbase.notification_sample('audit-create.json') @base.WatcherObjectRegistry.register_notification class AuditCreateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('AuditCreatePayload') } @notificationbase.notification_sample('audit-update.json') @base.WatcherObjectRegistry.register_notification class AuditUpdateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('AuditUpdatePayload') } @notificationbase.notification_sample('audit-delete.json') @base.WatcherObjectRegistry.register_notification class AuditDeleteNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('AuditDeletePayload') } def _get_common_payload(audit): goal = None strategy = None try: goal = audit.goal if audit.strategy_id: strategy = audit.strategy except NotImplementedError: raise exception.EagerlyLoadedAuditRequired(audit=audit.uuid) goal_payload = goal_notifications.GoalPayload(goal=goal) strategy_payload = None if strategy: strategy_payload = strategy_notifications.StrategyPayload( strategy=strategy) return goal_payload, strategy_payload def send_create(context, audit, service='infra-optim', host=None): """Emit an audit.create notification.""" goal_payload, strategy_payload = _get_common_payload(audit) versioned_payload = AuditCreatePayload( audit=audit, goal=goal_payload, strategy=strategy_payload, ) notification = AuditCreateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='audit', action=wfields.NotificationAction.CREATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_update(context, audit, service='infra-optim', host=None, old_state=None): """Emit an audit.update notification.""" goal_payload, strategy_payload = _get_common_payload(audit) state_update = AuditStateUpdatePayload( old_state=old_state, state=audit.state if old_state else None) versioned_payload = AuditUpdatePayload( audit=audit, state_update=state_update, goal=goal_payload, strategy=strategy_payload, ) notification = AuditUpdateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='audit', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_delete(context, audit, service='infra-optim', host=None): goal_payload, strategy_payload = _get_common_payload(audit) versioned_payload = AuditDeletePayload( audit=audit, goal=goal_payload, strategy=strategy_payload, ) notification = AuditDeleteNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='audit', action=wfields.NotificationAction.DELETE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_action_notification(context, audit, action, phase=None, priority=wfields.NotificationPriority.INFO, service='infra-optim', host=None): """Emit an audit action notification.""" goal_payload, strategy_payload = _get_common_payload(audit) fault = None if phase == wfields.NotificationPhase.ERROR: fault = exception_notifications.ExceptionPayload.from_exception() versioned_payload = AuditActionPayload( audit=audit, goal=goal_payload, strategy=strategy_payload, fault=fault, ) notification = AuditActionNotification( priority=priority, event_type=notificationbase.EventType( object='audit', action=action, phase=phase), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/notifications/base.py0000664000175000017500000001763600000000000022464 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from watcher.common import exception from watcher.common import rpc from watcher.objects import base from watcher.objects import fields as wfields CONF = cfg.CONF LOG = log.getLogger(__name__) # Definition of notification levels in increasing order of severity NOTIFY_LEVELS = { wfields.NotificationPriority.DEBUG: 0, wfields.NotificationPriority.INFO: 1, wfields.NotificationPriority.WARNING: 2, wfields.NotificationPriority.ERROR: 3, wfields.NotificationPriority.CRITICAL: 4 } @base.WatcherObjectRegistry.register_if(False) class NotificationObject(base.WatcherObject): """Base class for every notification related versioned object.""" # Version 1.0: Initial version VERSION = '1.0' def __init__(self, **kwargs): super(NotificationObject, self).__init__(**kwargs) # The notification objects are created on the fly when watcher emits # the notification. This causes that every object shows every field as # changed. We don't want to send this meaningless information so we # reset the object after creation. self.obj_reset_changes(recursive=False) def save(self, context): raise exception.UnsupportedError() def obj_load_attr(self, attrname): raise exception.UnsupportedError() @base.WatcherObjectRegistry.register_notification class EventType(NotificationObject): # Version 1.0: Initial version # Version 1.1: Added STRATEGY action in NotificationAction enum # Version 1.2: Added PLANNER action in NotificationAction enum # Version 1.3: Added EXECUTION action in NotificationAction enum VERSION = '1.3' fields = { 'object': wfields.StringField(), 'action': wfields.NotificationActionField(), 'phase': wfields.NotificationPhaseField(nullable=True), } def to_notification_event_type_field(self): """Serialize the object to the wire format.""" s = '%s.%s' % (self.object, self.action) if self.obj_attr_is_set('phase'): s += '.%s' % self.phase return s @base.WatcherObjectRegistry.register_if(False) class NotificationPayloadBase(NotificationObject): """Base class for the payload of versioned notifications.""" # SCHEMA defines how to populate the payload fields. It is a dictionary # where every key value pair has the following format: # : (, # ) # The is the name where the data will be stored in the # payload object, this field has to be defined as a field of the payload. # The shall refer to name of the parameter passed as # kwarg to the payload's populate_schema() call and this object will be # used as the source of the data. The shall be # a valid field of the passed argument. # The SCHEMA needs to be applied with the populate_schema() call before the # notification can be emitted. # The value of the payload. field will be set by the # . field. The # will not be part of the payload object internal or # external representation. # Payload fields that are not set by the SCHEMA can be filled in the same # way as in any versioned object. SCHEMA = {} # Version 1.0: Initial version VERSION = '1.0' def __init__(self, **kwargs): super(NotificationPayloadBase, self).__init__(**kwargs) self.populated = not self.SCHEMA def populate_schema(self, **kwargs): """Populate the object based on the SCHEMA and the source objects :param kwargs: A dict contains the source object at the key defined in the SCHEMA """ for key, (obj, field) in self.SCHEMA.items(): source = kwargs[obj] if source.obj_attr_is_set(field): setattr(self, key, getattr(source, field)) self.populated = True # the schema population will create changed fields but we don't need # this information in the notification self.obj_reset_changes(recursive=False) @base.WatcherObjectRegistry.register_notification class NotificationPublisher(NotificationObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'host': wfields.StringField(nullable=False), 'binary': wfields.StringField(nullable=False), } @base.WatcherObjectRegistry.register_if(False) class NotificationBase(NotificationObject): """Base class for versioned notifications. Every subclass shall define a 'payload' field. """ # Version 1.0: Initial version VERSION = '1.0' fields = { 'priority': wfields.NotificationPriorityField(), 'event_type': wfields.ObjectField('EventType'), 'publisher': wfields.ObjectField('NotificationPublisher'), } def save(self, context): raise exception.UnsupportedError() def obj_load_attr(self, attrname): raise exception.UnsupportedError() def _should_notify(self): """Determine whether the notification should be sent. A notification is sent when the level of the notification is greater than or equal to the level specified in the configuration, in the increasing order of DEBUG, INFO, WARNING, ERROR, CRITICAL. :return: True if notification should be sent, False otherwise. """ if not CONF.notification_level: return False return (NOTIFY_LEVELS[self.priority] >= NOTIFY_LEVELS[CONF.notification_level]) def _emit(self, context, event_type, publisher_id, payload): notifier = rpc.get_notifier(publisher_id) notify = getattr(notifier, self.priority) LOG.debug("Emitting notification `%s`", event_type) notify(context, event_type=event_type, payload=payload) def emit(self, context): """Send the notification.""" if not self._should_notify(): return if not self.payload.populated: raise exception.NotificationPayloadError( class_name=self.__class__.__name__) # Note(gibi): notification payload will be a newly populated object # therefore every field of it will look changed so this does not carry # any extra information so we drop this from the payload. self.payload.obj_reset_changes(recursive=False) self._emit( context, event_type=self.event_type.to_notification_event_type_field(), publisher_id='%s:%s' % (self.publisher.binary, self.publisher.host), payload=self.payload.obj_to_primitive()) def notification_sample(sample): """Provide a notification sample of the decorated notification. Class decorator to attach the notification sample information to the notification object for documentation generation purposes. :param sample: the path of the sample json file relative to the doc/notification_samples/ directory in the watcher repository root. """ def wrap(cls): if not getattr(cls, 'samples', None): cls.samples = [sample] else: cls.samples.append(sample) return cls return wrap ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/notifications/exception.py0000664000175000017500000000370600000000000023541 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import sys from watcher.notifications import base as notificationbase from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register_notification class ExceptionPayload(notificationbase.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'module_name': wfields.StringField(), 'function_name': wfields.StringField(), 'exception': wfields.StringField(), 'exception_message': wfields.StringField() } @classmethod def from_exception(cls, fault=None): fault = fault or sys.exc_info()[1] trace = inspect.trace()[-1] # TODO(gibi): apply strutils.mask_password on exception_message and # consider emitting the exception_message only if the safe flag is # true in the exception like in the REST API return cls( function_name=trace[3], module_name=inspect.getmodule(trace[0]).__name__, exception=fault.__class__.__name__, exception_message=str(fault)) @notificationbase.notification_sample('infra-optim-exception.json') @base.WatcherObjectRegistry.register_notification class ExceptionNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ExceptionPayload') } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/notifications/goal.py0000664000175000017500000000346300000000000022465 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.notifications import base as notificationbase from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register_notification class GoalPayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'uuid': ('goal', 'uuid'), 'name': ('goal', 'name'), 'display_name': ('goal', 'display_name'), 'efficacy_specification': ('goal', 'efficacy_specification'), 'created_at': ('goal', 'created_at'), 'updated_at': ('goal', 'updated_at'), 'deleted_at': ('goal', 'deleted_at'), } # Version 1.0: Initial version VERSION = '1.0' fields = { 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'display_name': wfields.StringField(), 'efficacy_specification': wfields.FlexibleListOfDictField(), 'created_at': wfields.DateTimeField(nullable=True), 'updated_at': wfields.DateTimeField(nullable=True), 'deleted_at': wfields.DateTimeField(nullable=True), } def __init__(self, goal, **kwargs): super(GoalPayload, self).__init__(**kwargs) self.populate_schema(goal=goal) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/notifications/service.py0000664000175000017500000000745500000000000023210 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.notifications import base as notificationbase from watcher.objects import base from watcher.objects import fields as wfields from watcher.objects import service as o_service CONF = cfg.CONF @base.WatcherObjectRegistry.register_notification class ServicePayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'sevice_host': ('failed_service', 'host'), 'name': ('failed_service', 'name'), 'last_seen_up': ('failed_service', 'last_seen_up'), } # Version 1.0: Initial version VERSION = '1.0' fields = { 'sevice_host': wfields.StringField(), 'name': wfields.StringField(), 'last_seen_up': wfields.DateTimeField(nullable=True), } def __init__(self, failed_service, status_update, **kwargs): super(ServicePayload, self).__init__( failed_service=failed_service, status_update=status_update, **kwargs) self.populate_schema(failed_service=failed_service) @base.WatcherObjectRegistry.register_notification class ServiceStatusUpdatePayload(notificationbase.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'old_state': wfields.StringField(nullable=True), 'state': wfields.StringField(nullable=True), } @base.WatcherObjectRegistry.register_notification class ServiceUpdatePayload(ServicePayload): # Version 1.0: Initial version VERSION = '1.0' fields = { 'status_update': wfields.ObjectField('ServiceStatusUpdatePayload'), } def __init__(self, failed_service, status_update): super(ServiceUpdatePayload, self).__init__( failed_service=failed_service, status_update=status_update) @notificationbase.notification_sample('service-update.json') @base.WatcherObjectRegistry.register_notification class ServiceUpdateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ServiceUpdatePayload') } def send_service_update(context, failed_service, state, service='infra-optim', host=None): """Emit an service failed notification.""" if state == o_service.ServiceStatus.FAILED: priority = wfields.NotificationPriority.WARNING status_update = ServiceStatusUpdatePayload( old_state=o_service.ServiceStatus.ACTIVE, state=o_service.ServiceStatus.FAILED) else: priority = wfields.NotificationPriority.INFO status_update = ServiceStatusUpdatePayload( old_state=o_service.ServiceStatus.FAILED, state=o_service.ServiceStatus.ACTIVE) versioned_payload = ServiceUpdatePayload( failed_service=failed_service, status_update=status_update ) notification = ServiceUpdateNotification( priority=priority, event_type=notificationbase.EventType( object='service', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/notifications/strategy.py0000664000175000017500000000352500000000000023404 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.notifications import base as notificationbase from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register_notification class StrategyPayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'uuid': ('strategy', 'uuid'), 'name': ('strategy', 'name'), 'display_name': ('strategy', 'display_name'), 'parameters_spec': ('strategy', 'parameters_spec'), 'created_at': ('strategy', 'created_at'), 'updated_at': ('strategy', 'updated_at'), 'deleted_at': ('strategy', 'deleted_at'), } # Version 1.0: Initial version VERSION = '1.0' fields = { 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'display_name': wfields.StringField(), 'parameters_spec': wfields.FlexibleDictField(nullable=True), 'created_at': wfields.DateTimeField(nullable=True), 'updated_at': wfields.DateTimeField(nullable=True), 'deleted_at': wfields.DateTimeField(nullable=True), } def __init__(self, strategy, **kwargs): super(StrategyPayload, self).__init__(**kwargs) self.populate_schema(strategy=strategy) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6391354 python_watcher-14.0.0/watcher/objects/0000775000175000017500000000000000000000000017743 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/__init__.py0000664000175000017500000000314000000000000022052 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(comstud): You may scratch your head as you see code that imports # this module and then accesses attributes for objects such as Node, # etc, yet you do not see these attributes in here. Never fear, there is # a little bit of magic. When objects are registered, an attribute is set # on this module automatically, pointing to the newest/latest version of # the object. def register_all(): # NOTE(danms): You must make sure your object gets imported in this # function in order for it to be registered by services that may # need to receive it via RPC. __import__('watcher.objects.goal') __import__('watcher.objects.strategy') __import__('watcher.objects.audit_template') __import__('watcher.objects.audit') __import__('watcher.objects.action_plan') __import__('watcher.objects.action') __import__('watcher.objects.efficacy_indicator') __import__('watcher.objects.scoring_engine') __import__('watcher.objects.service') __import__('watcher.objects.action_description') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/action.py0000664000175000017500000001544100000000000021577 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher import notifications from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields class State(object): PENDING = 'PENDING' ONGOING = 'ONGOING' FAILED = 'FAILED' SUCCEEDED = 'SUCCEEDED' DELETED = 'DELETED' CANCELLED = 'CANCELLED' CANCELLING = 'CANCELLING' @base.WatcherObjectRegistry.register class Action(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'action_plan' object field # Version 2.0: Removed 'next' object field, Added 'parents' object field VERSION = '2.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'action_plan_id': wfields.IntegerField(), 'action_type': wfields.StringField(nullable=True), 'input_parameters': wfields.DictField(nullable=True), 'state': wfields.StringField(nullable=True), 'parents': wfields.ListOfStringsField(nullable=True), 'action_plan': wfields.ObjectField('ActionPlan', nullable=True), } object_fields = { 'action_plan': (objects.ActionPlan, 'action_plan_id'), } @base.remotable_classmethod def get(cls, context, action_id, eager=False): """Find a action based on its id or uuid and return a Action object. :param action_id: the id *or* uuid of a action. :param eager: Load object fields if True (Default: False) :returns: a :class:`Action` object. """ if utils.is_int_like(action_id): return cls.get_by_id(context, action_id, eager=eager) elif utils.is_uuid_like(action_id): return cls.get_by_uuid(context, action_id, eager=eager) else: raise exception.InvalidIdentity(identity=action_id) @base.remotable_classmethod def get_by_id(cls, context, action_id, eager=False): """Find a action based on its integer id and return a Action object. :param action_id: the id of a action. :param eager: Load object fields if True (Default: False) :returns: a :class:`Action` object. """ db_action = cls.dbapi.get_action_by_id(context, action_id, eager=eager) action = cls._from_db_object(cls(context), db_action, eager=eager) return action @base.remotable_classmethod def get_by_uuid(cls, context, uuid, eager=False): """Find a action based on uuid and return a :class:`Action` object. :param uuid: the uuid of a action. :param context: Security context :param eager: Load object fields if True (Default: False) :returns: a :class:`Action` object. """ db_action = cls.dbapi.get_action_by_uuid(context, uuid, eager=eager) action = cls._from_db_object(cls(context), db_action, eager=eager) return action @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None, eager=False): """Return a list of Action objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param filters: Filters to apply. Defaults to None. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param eager: Load object fields if True (Default: False) :returns: a list of :class:`Action` object. """ db_actions = cls.dbapi.get_action_list(context, limit=limit, marker=marker, filters=filters, sort_key=sort_key, sort_dir=sort_dir, eager=eager) return [cls._from_db_object(cls(context), obj, eager=eager) for obj in db_actions] @base.remotable def create(self): """Create an :class:`Action` record in the DB. :returns: An :class:`Action` object. """ values = self.obj_get_changes() db_action = self.dbapi.create_action(values) # Note(v-francoise): Always load eagerly upon creation so we can send # notifications containing information about the related relationships self._from_db_object(self, db_action, eager=True) notifications.action.send_create(self.obj_context, self) def destroy(self): """Delete the Action from the DB""" self.dbapi.destroy_action(self.uuid) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this Action. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_action(self.uuid, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) notifications.action.send_update(self.obj_context, self) self.obj_reset_changes() @base.remotable def refresh(self, eager=False): """Loads updates for this Action. Loads a action with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded action column by column, if there are any updates. :param eager: Load object fields if True (Default: False) """ current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) self.obj_refresh(current) @base.remotable def soft_delete(self): """Soft Delete the Audit from the DB""" self.state = State.DELETED self.save() db_obj = self.dbapi.soft_delete_action(self.uuid) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) notifications.action.send_delete(self.obj_context, self) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/action_description.py0000664000175000017500000001264000000000000024200 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class ActionDescription(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'action_type': wfields.StringField(), 'description': wfields.StringField(), } @base.remotable_classmethod def get(cls, context, action_id): """Find a action description based on its id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object :param action_id: the id of a action description. :returns: a :class:`ActionDescription` object. """ if utils.is_int_like(action_id): db_action = cls.dbapi.get_action_description_by_id( context, action_id) action = ActionDescription._from_db_object(cls(context), db_action) return action else: raise exception.InvalidIdentity(identity=action_id) @base.remotable_classmethod def get_by_type(cls, context, action_type): """Find a action description based on action type :param action_type: the action type of a action description. :param context: Security context :returns: a :class:`ActionDescription` object. """ db_action = cls.dbapi.get_action_description_by_type( context, action_type) action = cls._from_db_object(cls(context), db_action) return action @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None): """Return a list of :class:`ActionDescription` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ActionDescription(context) :param filters: dict mapping the filter key to a value. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`ActionDescription` object. """ db_actions = cls.dbapi.get_action_description_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj) for obj in db_actions] @base.remotable def create(self): """Create a :class:`ActionDescription` record in the DB.""" values = self.obj_get_changes() db_action = self.dbapi.create_action_description(values) self._from_db_object(self, db_action) @base.remotable def save(self): """Save updates to this :class:`ActionDescription`. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_action_description(self.id, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) self.obj_reset_changes() def refresh(self): """Loads updates for this :class:`ActionDescription`. Loads a action description with the same id from the database and checks for updated attributes. Updates are applied from the loaded action description column by column, if there are any updates. """ current = self.get(self._context, action_id=self.id) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] def soft_delete(self): """Soft Delete the :class:`ActionDescription` from the DB.""" db_obj = self.dbapi.soft_delete_action_description(self.id) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/action_plan.py0000664000175000017500000003230500000000000022607 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Action Plan ` is a flow of :ref:`Actions ` that should be executed in order to satisfy a given :ref:`Goal `. An :ref:`Action Plan ` is generated by Watcher when an :ref:`Audit ` is successful which implies that the :ref:`Strategy ` which was used has found a :ref:`Solution ` to achieve the :ref:`Goal ` of this :ref:`Audit `. In the default implementation of Watcher, an :ref:`Action Plan ` is only composed of successive :ref:`Actions ` (i.e., a Workflow of :ref:`Actions ` belonging to a unique branch). However, Watcher provides abstract interfaces for many of its components, allowing other implementations to generate and handle more complex :ref:`Action Plan(s) ` composed of two types of Action Item(s): - simple :ref:`Actions `: atomic tasks, which means it can not be split into smaller tasks or commands from an OpenStack point of view. - composite Actions: which are composed of several simple :ref:`Actions ` ordered in sequential and/or parallel flows. An :ref:`Action Plan ` may be described using standard workflow model description formats such as `Business Process Model and Notation 2.0 (BPMN 2.0) `_ or `Unified Modeling Language (UML) `_. An :ref:`Action Plan ` has a life-cycle and its current state may be one of the following: - **RECOMMENDED** : the :ref:`Action Plan ` is waiting for a validation from the :ref:`Administrator ` - **ONGOING** : the :ref:`Action Plan ` is currently being processed by the :ref:`Watcher Applier ` - **SUCCEEDED** : the :ref:`Action Plan ` has been executed successfully (i.e. all :ref:`Actions ` that it contains have been executed successfully) - **FAILED** : an error occurred while executing the :ref:`Action Plan ` - **DELETED** : the :ref:`Action Plan ` is still stored in the :ref:`Watcher database ` but is not returned any more through the Watcher APIs. - **CANCELLED** : the :ref:`Action Plan ` was in **PENDING** or **ONGOING** state and was cancelled by the :ref:`Administrator ` - **SUPERSEDED** : the :ref:`Action Plan ` was in **RECOMMENDED** state and was superseded by the :ref:`Administrator ` """ import datetime from oslo_utils import timeutils from watcher.common import exception from watcher.common import utils from watcher import conf from watcher.db import api as db_api from watcher import notifications from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields CONF = conf.CONF class State(object): RECOMMENDED = 'RECOMMENDED' PENDING = 'PENDING' ONGOING = 'ONGOING' FAILED = 'FAILED' SUCCEEDED = 'SUCCEEDED' DELETED = 'DELETED' CANCELLED = 'CANCELLED' SUPERSEDED = 'SUPERSEDED' CANCELLING = 'CANCELLING' @base.WatcherObjectRegistry.register class ActionPlan(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'audit' and 'strategy' object field # Version 1.2: audit_id is not nullable anymore # Version 2.0: Removed 'first_action_id' object field # Version 2.1: Changed global_efficacy type # Version 2.2: Added 'hostname' field VERSION = '2.2' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'audit_id': wfields.IntegerField(), 'strategy_id': wfields.IntegerField(), 'state': wfields.StringField(nullable=True), 'global_efficacy': wfields.FlexibleListOfDictField(nullable=True), 'hostname': wfields.StringField(nullable=True), 'audit': wfields.ObjectField('Audit', nullable=True), 'strategy': wfields.ObjectField('Strategy', nullable=True), } object_fields = { 'audit': (objects.Audit, 'audit_id'), 'strategy': (objects.Strategy, 'strategy_id'), } # Proxified field so we can keep the previous value after an update _state = None _old_state = None # NOTE(v-francoise): The way oslo.versionedobjects works is by using a # __new__ that will automatically create the attributes referenced in # fields. These attributes are properties that raise an exception if no # value has been assigned, which means that they store the actual field # value in an "_obj_%(field)s" attribute. So because we want to proxify a # value that is already proxified, we have to do what you see below. @property def _obj_state(self): return self._state @property def _obj_old_state(self): return self._old_state @property def old_state(self): return self._old_state @_obj_old_state.setter def _obj_old_state(self, value): self._old_state = value @_obj_state.setter def _obj_state(self, value): if self._old_state is None and self._state is None: self._state = value else: self._old_state, self._state = self._state, value @base.remotable_classmethod def get(cls, context, action_plan_id, eager=False): """Find a action_plan based on its id or uuid and return a Action object. :param action_plan_id: the id *or* uuid of a action_plan. :param eager: Load object fields if True (Default: False) :returns: a :class:`Action` object. """ # noqa: E501 if utils.is_int_like(action_plan_id): return cls.get_by_id(context, action_plan_id, eager=eager) elif utils.is_uuid_like(action_plan_id): return cls.get_by_uuid(context, action_plan_id, eager=eager) else: raise exception.InvalidIdentity(identity=action_plan_id) @base.remotable_classmethod def get_by_id(cls, context, action_plan_id, eager=False): """Find a action_plan based on its integer id and return a ActionPlan object. :param action_plan_id: the id of a action_plan. :param eager: Load object fields if True (Default: False) :returns: a :class:`ActionPlan` object. """ # noqa: E501 db_action_plan = cls.dbapi.get_action_plan_by_id( context, action_plan_id, eager=eager) action_plan = cls._from_db_object( cls(context), db_action_plan, eager=eager) return action_plan @base.remotable_classmethod def get_by_uuid(cls, context, uuid, eager=False): """Find a action_plan based on uuid and return a :class:`ActionPlan` object. :param uuid: the uuid of a action_plan. :param context: Security context :param eager: Load object fields if True (Default: False) :returns: a :class:`ActionPlan` object. """ # noqa: E501 db_action_plan = cls.dbapi.get_action_plan_by_uuid( context, uuid, eager=eager) action_plan = cls._from_db_object( cls(context), db_action_plan, eager=eager) return action_plan @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None, eager=False): """Return a list of ActionPlan objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param filters: Filters to apply. Defaults to None. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param eager: Load object fields if True (Default: False) :returns: a list of :class:`ActionPlan` object. """ db_action_plans = cls.dbapi.get_action_plan_list(context, limit=limit, marker=marker, filters=filters, sort_key=sort_key, sort_dir=sort_dir, eager=eager) return [cls._from_db_object(cls(context), obj, eager=eager) for obj in db_action_plans] @base.remotable def create(self): """Create an :class:`ActionPlan` record in the DB. :returns: An :class:`ActionPlan` object. """ values = self.obj_get_changes() db_action_plan = self.dbapi.create_action_plan(values) # Note(v-francoise): Always load eagerly upon creation so we can send # notifications containing information about the related relationships self._from_db_object(self, db_action_plan, eager=True) def _notify(): notifications.action_plan.send_create(self._context, self) _notify() @base.remotable def destroy(self): """Delete the action plan from the DB""" related_efficacy_indicators = objects.EfficacyIndicator.list( context=self._context, filters={"action_plan_uuid": self.uuid}) # Cascade soft_delete of related efficacy indicators for related_efficacy_indicator in related_efficacy_indicators: related_efficacy_indicator.destroy() self.dbapi.destroy_action_plan(self.uuid) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this Action plan. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_action_plan(self.uuid, updates) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) def _notify(): notifications.action_plan.send_update( self._context, self, old_state=self.old_state) _notify() self.obj_reset_changes() @base.remotable def refresh(self, eager=False): """Loads updates for this Action plan. Loads a action_plan with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded action_plan column by column, if there are any updates. :param eager: Load object fields if True (Default: False) """ current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) self.obj_refresh(current) @base.remotable def soft_delete(self): """Soft Delete the Action plan from the DB""" related_actions = objects.Action.list( context=self._context, filters={"action_plan_uuid": self.uuid}, eager=True) # Cascade soft_delete of related actions for related_action in related_actions: related_action.soft_delete() related_efficacy_indicators = objects.EfficacyIndicator.list( context=self._context, filters={"action_plan_uuid": self.uuid}) # Cascade soft_delete of related efficacy indicators for related_efficacy_indicator in related_efficacy_indicators: related_efficacy_indicator.soft_delete() self.state = State.DELETED self.save() db_obj = self.dbapi.soft_delete_action_plan(self.uuid) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) def _notify(): notifications.action_plan.send_delete(self._context, self) _notify() class StateManager(object): def check_expired(self, context): action_plan_expiry = ( CONF.watcher_decision_engine.action_plan_expiry) date_created = timeutils.utcnow() - datetime.timedelta( hours=action_plan_expiry) filters = {'state__eq': State.RECOMMENDED, 'created_at__lt': date_created} action_plans = objects.ActionPlan.list( context, filters=filters, eager=True) for action_plan in action_plans: action_plan.state = State.SUPERSEDED action_plan.save() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/audit.py0000664000175000017500000003403500000000000021430 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ In the Watcher system, an :ref:`Audit ` is a request for optimizing a :ref:`Cluster `. The optimization is done in order to satisfy one :ref:`Goal ` on a given :ref:`Cluster `. For each :ref:`Audit `, the Watcher system generates an :ref:`Action Plan `. An :ref:`Audit ` has a life-cycle and its current state may be one of the following: - **PENDING** : a request for an :ref:`Audit ` has been submitted (either manually by the :ref:`Administrator ` or automatically via some event handling mechanism) and is in the queue for being processed by the :ref:`Watcher Decision Engine ` - **ONGOING** : the :ref:`Audit ` is currently being processed by the :ref:`Watcher Decision Engine ` - **SUCCEEDED** : the :ref:`Audit ` has been executed successfully (note that it may not necessarily produce a :ref:`Solution `). - **FAILED** : an error occurred while executing the :ref:`Audit ` - **DELETED** : the :ref:`Audit ` is still stored in the :ref:`Watcher database ` but is not returned any more through the Watcher APIs. - **CANCELLED** : the :ref:`Audit ` was in **PENDING** or **ONGOING** state and was cancelled by the :ref:`Administrator ` - **SUSPENDED** : the :ref:`Audit ` was in **ONGOING** state and was suspended by the :ref:`Administrator ` """ import enum from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher import notifications from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields class State(object): ONGOING = 'ONGOING' SUCCEEDED = 'SUCCEEDED' FAILED = 'FAILED' CANCELLED = 'CANCELLED' DELETED = 'DELETED' PENDING = 'PENDING' SUSPENDED = 'SUSPENDED' class AuditType(enum.Enum): ONESHOT = 'ONESHOT' CONTINUOUS = 'CONTINUOUS' EVENT = 'EVENT' @base.WatcherObjectRegistry.register class Audit(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'goal' and 'strategy' object field # Version 1.2: Added 'auto_trigger' boolean field # Version 1.3: Added 'next_run_time' DateTime field, # 'interval' type has been changed from Integer to String # Version 1.4: Added 'name' string field # Version 1.5: Added 'hostname' field # Version 1.6: Added 'start_time' and 'end_time' DateTime fields # Version 1.7: Added 'force' boolean field VERSION = '1.7' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'audit_type': wfields.StringField(), 'state': wfields.StringField(), 'parameters': wfields.FlexibleDictField(nullable=True), 'interval': wfields.StringField(nullable=True), 'scope': wfields.FlexibleListOfDictField(nullable=True), 'goal_id': wfields.IntegerField(), 'strategy_id': wfields.IntegerField(nullable=True), 'auto_trigger': wfields.BooleanField(), 'next_run_time': wfields.DateTimeField(nullable=True, tzinfo_aware=False), 'hostname': wfields.StringField(nullable=True), 'start_time': wfields.DateTimeField(nullable=True, tzinfo_aware=False), 'end_time': wfields.DateTimeField(nullable=True, tzinfo_aware=False), 'force': wfields.BooleanField(default=False, nullable=False), 'goal': wfields.ObjectField('Goal', nullable=True), 'strategy': wfields.ObjectField('Strategy', nullable=True), } object_fields = { 'goal': (objects.Goal, 'goal_id'), 'strategy': (objects.Strategy, 'strategy_id'), } def __init__(self, *args, **kwargs): if 'force' not in kwargs: kwargs['force'] = False super(Audit, self).__init__(*args, **kwargs) # Proxified field so we can keep the previous value after an update _state = None _old_state = None # NOTE(v-francoise): The way oslo.versionedobjects works is by using a # __new__ that will automatically create the attributes referenced in # fields. These attributes are properties that raise an exception if no # value has been assigned, which means that they store the actual field # value in an "_obj_%(field)s" attribute. So because we want to proxify a # value that is already proxified, we have to do what you see below. @property def _obj_state(self): return self._state @property def _obj_old_state(self): return self._old_state @property def old_state(self): return self._old_state @_obj_old_state.setter def _obj_old_state(self, value): self._old_state = value @_obj_state.setter def _obj_state(self, value): if self._old_state is None and self._state is None: self._state = value else: self._old_state, self._state = self._state, value @base.remotable_classmethod def get(cls, context, audit_id, eager=False): """Find a audit based on its id or uuid and return a Audit object. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) :param audit_id: the id *or* uuid of a audit. :param eager: Load object fields if True (Default: False) :returns: a :class:`Audit` object. """ if utils.is_int_like(audit_id): return cls.get_by_id(context, audit_id, eager=eager) elif utils.is_uuid_like(audit_id): return cls.get_by_uuid(context, audit_id, eager=eager) else: raise exception.InvalidIdentity(identity=audit_id) @base.remotable_classmethod def get_by_id(cls, context, audit_id, eager=False): """Find a audit based on its integer id and return a Audit object. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) :param audit_id: the id of a audit. :param eager: Load object fields if True (Default: False) :returns: a :class:`Audit` object. """ db_audit = cls.dbapi.get_audit_by_id(context, audit_id, eager=eager) audit = cls._from_db_object(cls(context), db_audit, eager=eager) return audit @base.remotable_classmethod def get_by_uuid(cls, context, uuid, eager=False): """Find a audit based on uuid and return a :class:`Audit` object. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) :param uuid: the uuid of a audit. :param eager: Load object fields if True (Default: False) :returns: a :class:`Audit` object. """ db_audit = cls.dbapi.get_audit_by_uuid(context, uuid, eager=eager) audit = cls._from_db_object(cls(context), db_audit, eager=eager) return audit @base.remotable_classmethod def get_by_name(cls, context, name, eager=False): """Find an audit based on name and return a :class:`Audit` object. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) :param name: the name of an audit. :param eager: Load object fields if True (Default: False) :returns: a :class:`Audit` object. """ db_audit = cls.dbapi.get_audit_by_name(context, name, eager=eager) audit = cls._from_db_object(cls(context), db_audit, eager=eager) return audit @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None, eager=False): """Return a list of Audit objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param filters: Filters to apply. Defaults to None. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param eager: Load object fields if True (Default: False) :returns: a list of :class:`Audit` object. """ db_audits = cls.dbapi.get_audit_list(context, limit=limit, marker=marker, filters=filters, sort_key=sort_key, sort_dir=sort_dir, eager=eager) return [cls._from_db_object(cls(context), obj, eager=eager) for obj in db_audits] @base.remotable def create(self): """Create an :class:`Audit` record in the DB. :returns: An :class:`Audit` object. """ values = self.obj_get_changes() db_audit = self.dbapi.create_audit(values) # Note(v-francoise): Always load eagerly upon creation so we can send # notifications containing information about the related relationships self._from_db_object(self, db_audit, eager=True) def _notify(): notifications.audit.send_create(self._context, self) _notify() @base.remotable def destroy(self): """Delete the Audit from the DB.""" self.dbapi.destroy_audit(self.uuid) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this Audit. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_audit(self.uuid, updates) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) def _notify(): notifications.audit.send_update( self._context, self, old_state=self.old_state) _notify() self.obj_reset_changes() @base.remotable def refresh(self, eager=False): """Loads updates for this Audit. Loads a audit with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded audit column by column, if there are any updates. :param eager: Load object fields if True (Default: False) """ current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) self.obj_refresh(current) @base.remotable def soft_delete(self): """Soft Delete the Audit from the DB.""" self.state = State.DELETED self.save() db_obj = self.dbapi.soft_delete_audit(self.uuid) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) def _notify(): notifications.audit.send_delete(self._context, self) _notify() class AuditStateTransitionManager(object): TRANSITIONS = { State.PENDING: [State.ONGOING, State.CANCELLED], State.ONGOING: [State.FAILED, State.SUCCEEDED, State.CANCELLED, State.SUSPENDED], State.FAILED: [State.DELETED], State.SUCCEEDED: [State.DELETED], State.CANCELLED: [State.DELETED], State.SUSPENDED: [State.ONGOING, State.DELETED], } INACTIVE_STATES = (State.CANCELLED, State.DELETED, State.FAILED, State.SUSPENDED) def check_transition(self, initial, new): return new in self.TRANSITIONS.get(initial, []) def is_inactive(self, audit): return audit.state in self.INACTIVE_STATES ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/audit_template.py0000664000175000017500000002420600000000000023322 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Audit ` may be launched several times with the same settings (:ref:`Goal `, thresholds, ...). Therefore it makes sense to save those settings in some sort of Audit preset object, which is known as an :ref:`Audit Template `. An :ref:`Audit Template ` contains at least the :ref:`Goal ` of the :ref:`Audit `. It may also contain some error handling settings indicating whether: - :ref:`Watcher Applier ` stops the entire operation - :ref:`Watcher Applier ` performs a rollback and how many retries should be attempted before failure occurs (also the latter can be complex: for example the scenario in which there are many first-time failures on ultimately successful :ref:`Actions `). Moreover, an :ref:`Audit Template ` may contain some settings related to the level of automation for the :ref:`Action Plan ` that will be generated by the :ref:`Audit `. A flag will indicate whether the :ref:`Action Plan ` will be launched automatically or will need a manual confirmation from the :ref:`Administrator `. Last but not least, an :ref:`Audit Template ` may contain a list of extra parameters related to the :ref:`Strategy ` configuration. These parameters can be provided as a list of key-value pairs. """ from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class AuditTemplate(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'goal' and 'strategy' object field VERSION = '1.1' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'description': wfields.StringField(nullable=True), 'scope': wfields.FlexibleListOfDictField(nullable=True), 'goal_id': wfields.IntegerField(), 'strategy_id': wfields.IntegerField(nullable=True), 'goal': wfields.ObjectField('Goal', nullable=True), 'strategy': wfields.ObjectField('Strategy', nullable=True), } object_fields = { 'goal': (objects.Goal, 'goal_id'), 'strategy': (objects.Strategy, 'strategy_id'), } @base.remotable_classmethod def get(cls, context, audit_template_id, eager=False): """Find an audit template based on its id or uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: AuditTemplate(context) :param audit_template_id: the id *or* uuid of a audit_template. :param eager: Load object fields if True (Default: False) :returns: a :class:`AuditTemplate` object. """ if utils.is_int_like(audit_template_id): return cls.get_by_id(context, audit_template_id, eager=eager) elif utils.is_uuid_like(audit_template_id): return cls.get_by_uuid(context, audit_template_id, eager=eager) else: raise exception.InvalidIdentity(identity=audit_template_id) @base.remotable_classmethod def get_by_id(cls, context, audit_template_id, eager=False): """Find an audit template based on its integer id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: AuditTemplate(context) :param audit_template_id: the id of a audit_template. :param eager: Load object fields if True (Default: False) :returns: a :class:`AuditTemplate` object. """ db_audit_template = cls.dbapi.get_audit_template_by_id( context, audit_template_id, eager=eager) audit_template = cls._from_db_object( cls(context), db_audit_template, eager=eager) return audit_template @base.remotable_classmethod def get_by_uuid(cls, context, uuid, eager=False): """Find an audit template based on uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: AuditTemplate(context) :param uuid: the uuid of a audit_template. :param eager: Load object fields if True (Default: False) :returns: a :class:`AuditTemplate` object. """ db_audit_template = cls.dbapi.get_audit_template_by_uuid( context, uuid, eager=eager) audit_template = cls._from_db_object( cls(context), db_audit_template, eager=eager) return audit_template @base.remotable_classmethod def get_by_name(cls, context, name, eager=False): """Find an audit template based on name :param name: the logical name of a audit_template. :param context: Security context :param eager: Load object fields if True (Default: False) :returns: a :class:`AuditTemplate` object. """ db_audit_template = cls.dbapi.get_audit_template_by_name( context, name, eager=eager) audit_template = cls._from_db_object( cls(context), db_audit_template, eager=eager) return audit_template @base.remotable_classmethod def list(cls, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Return a list of :class:`AuditTemplate` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: AuditTemplate(context) :param filters: dict mapping the filter key to a value. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param eager: Load object fields if True (Default: False) :returns: a list of :class:`AuditTemplate` object. """ db_audit_templates = cls.dbapi.get_audit_template_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, eager=eager) return [cls._from_db_object(cls(context), obj, eager=eager) for obj in db_audit_templates] @base.remotable def create(self): """Create a :class:`AuditTemplate` record in the DB :returns: An :class:`AuditTemplate` object. """ values = self.obj_get_changes() db_audit_template = self.dbapi.create_audit_template(values) # Note(v-francoise): Always load eagerly upon creation so we can send # notifications containing information about the related relationships self._from_db_object(self, db_audit_template, eager=True) def destroy(self): """Delete the :class:`AuditTemplate` from the DB""" self.dbapi.destroy_audit_template(self.uuid) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this :class:`AuditTemplate`. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_audit_template(self.uuid, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) self.obj_reset_changes() @base.remotable def refresh(self, eager=False): """Loads updates for this :class:`AuditTemplate`. Loads a audit_template with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded audit_template column by column, if there are any updates. :param eager: Load object fields if True (Default: False) """ current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) self.obj_refresh(current) @base.remotable def soft_delete(self): """Soft Delete the :class:`AuditTemplate` from the DB""" db_obj = self.dbapi.soft_delete_audit_template(self.uuid) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/base.py0000664000175000017500000001462200000000000021234 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Watcher common internal object model""" from oslo_utils import versionutils from oslo_versionedobjects import base as ovo_base from oslo_versionedobjects import fields as ovo_fields from watcher import objects remotable_classmethod = ovo_base.remotable_classmethod remotable = ovo_base.remotable def get_attrname(name): """Return the mangled name of the attribute's underlying storage.""" # FIXME(danms): This is just until we use o.vo's class properties # and object base. return '_obj_' + name class WatcherObjectRegistry(ovo_base.VersionedObjectRegistry): notification_classes = [] def registration_hook(self, cls, index): # NOTE(danms): This is called when an object is registered, # and is responsible for maintaining watcher.objects.$OBJECT # as the highest-versioned implementation of a given object. version = versionutils.convert_version_to_tuple(cls.VERSION) if not hasattr(objects, cls.obj_name()): setattr(objects, cls.obj_name(), cls) else: cur_version = versionutils.convert_version_to_tuple( getattr(objects, cls.obj_name()).VERSION) if version >= cur_version: setattr(objects, cls.obj_name(), cls) @classmethod def register_notification(cls, notification_cls): """Register a class as notification. Use only to register concrete notification or payload classes, do not register base classes intended for inheritance only. """ cls.register_if(False)(notification_cls) cls.notification_classes.append(notification_cls) return notification_cls @classmethod def register_notification_objects(cls): """Register previously decorated notification as normal ovos. This is not intended for production use but only for testing and document generation purposes. """ for notification_cls in cls.notification_classes: cls.register(notification_cls) class WatcherObject(ovo_base.VersionedObject): """Base class and object factory. This forms the base of all objects that can be remoted or instantiated via RPC. Simply defining a class that inherits from this base class will make it remotely instantiatable. Objects should implement the necessary "get" classmethod routines as well as "save" object methods as appropriate. """ OBJ_SERIAL_NAMESPACE = 'watcher_object' OBJ_PROJECT_NAMESPACE = 'watcher' def as_dict(self): return { k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k)} class WatcherObjectDictCompat(ovo_base.VersionedObjectDictCompat): pass class WatcherComparableObject(ovo_base.ComparableVersionedObject): pass class WatcherPersistentObject(object): """Mixin class for Persistent objects. This adds the fields that we use in common for all persistent objects. """ fields = { 'created_at': ovo_fields.DateTimeField(nullable=True), 'updated_at': ovo_fields.DateTimeField(nullable=True), 'deleted_at': ovo_fields.DateTimeField(nullable=True), } # Mapping between the object field name and a 2-tuple pair composed of # its object type (e.g. objects.RelatedObject) and the name of the # model field related ID (or UUID) foreign key field. # e.g.: # # fields = { # # [...] # 'related_object_id': fields.IntegerField(), # Foreign key # 'related_object': wfields.ObjectField('RelatedObject'), # } # {'related_object': (objects.RelatedObject, 'related_object_id')} object_fields = {} def obj_refresh(self, loaded_object): """Applies updates for objects that inherit from base.WatcherObject. Checks for updated attributes in an object. Updates are applied from the loaded object column by column in comparison with the current object. """ fields = (field for field in self.fields if field not in self.object_fields) for field in fields: if (self.obj_attr_is_set(field) and self[field] != loaded_object[field]): self[field] = loaded_object[field] @staticmethod def _from_db_object(obj, db_object, eager=False): """Converts a database entity to a formal object. :param obj: An object of the class. :param db_object: A DB model of the object :param eager: Enable the loading of object fields (Default: False) :return: The object of the class with the database entity added """ obj_class = type(obj) object_fields = obj_class.object_fields for field in obj.fields: if field not in object_fields: obj[field] = db_object[field] if eager: # Load object fields context = obj._context loadable_fields = ( (obj_field, related_obj_cls, rel_id) for obj_field, (related_obj_cls, rel_id) in object_fields.items() if obj[rel_id] ) for obj_field, related_obj_cls, rel_id in loadable_fields: if getattr(db_object, obj_field, None) and obj[rel_id]: # The object field data was eagerly loaded alongside # the main object data obj[obj_field] = related_obj_cls._from_db_object( related_obj_cls(context), db_object[obj_field]) else: # The object field data wasn't loaded yet obj[obj_field] = related_obj_cls.get(context, obj[rel_id]) obj.obj_reset_changes() return obj class WatcherObjectSerializer(ovo_base.VersionedObjectSerializer): # Base class to use for object hydration OBJ_BASE_CLASS = WatcherObject ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/efficacy_indicator.py0000664000175000017500000001664400000000000024135 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class EfficacyIndicator(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'action_plan_id': wfields.IntegerField(), 'name': wfields.StringField(), 'description': wfields.StringField(nullable=True), 'unit': wfields.StringField(nullable=True), 'value': wfields.NumericField(), } @base.remotable_classmethod def get(cls, context, efficacy_indicator_id): """Find an efficacy indicator object given its ID or UUID :param efficacy_indicator_id: the ID or UUID of an efficacy indicator. :returns: a :class:`EfficacyIndicator` object. """ if utils.is_int_like(efficacy_indicator_id): return cls.get_by_id(context, efficacy_indicator_id) elif utils.is_uuid_like(efficacy_indicator_id): return cls.get_by_uuid(context, efficacy_indicator_id) else: raise exception.InvalidIdentity(identity=efficacy_indicator_id) @base.remotable_classmethod def get_by_id(cls, context, efficacy_indicator_id): """Find an efficacy indicator given its integer ID :param efficacy_indicator_id: the id of an efficacy indicator. :returns: a :class:`EfficacyIndicator` object. """ db_efficacy_indicator = cls.dbapi.get_efficacy_indicator_by_id( context, efficacy_indicator_id) efficacy_indicator = EfficacyIndicator._from_db_object( cls(context), db_efficacy_indicator) return efficacy_indicator @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find an efficacy indicator given its UUID :param uuid: the uuid of an efficacy indicator. :param context: Security context :returns: a :class:`EfficacyIndicator` object. """ db_efficacy_indicator = cls.dbapi.get_efficacy_indicator_by_uuid( context, uuid) efficacy_indicator = EfficacyIndicator._from_db_object( cls(context), db_efficacy_indicator) return efficacy_indicator @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None): """Return a list of EfficacyIndicator objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param filters: Filters to apply. Defaults to None. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`EfficacyIndicator` object. """ db_efficacy_indicators = cls.dbapi.get_efficacy_indicator_list( context, limit=limit, marker=marker, filters=filters, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj) for obj in db_efficacy_indicators] @base.remotable def create(self, context=None): """Create a EfficacyIndicator record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: EfficacyIndicator(context) """ values = self.obj_get_changes() db_efficacy_indicator = self.dbapi.create_efficacy_indicator(values) self._from_db_object(self, db_efficacy_indicator) def destroy(self, context=None): """Delete the EfficacyIndicator from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: EfficacyIndicator(context) """ self.dbapi.destroy_efficacy_indicator(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this EfficacyIndicator. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: EfficacyIndicator(context) """ updates = self.obj_get_changes() self.dbapi.update_efficacy_indicator(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this EfficacyIndicator. Loads an efficacy indicator with the same uuid from the database and checks for updated attributes. Updates are applied to the loaded efficacy indicator column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: EfficacyIndicator(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) self.obj_refresh(current) @base.remotable def soft_delete(self, context=None): """Soft Delete the efficacy indicator from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) """ self.dbapi.soft_delete_efficacy_indicator(self.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/fields.py0000664000175000017500000001121300000000000021561 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for objects""" import ast from oslo_serialization import jsonutils from oslo_versionedobjects import fields BaseEnumField = fields.BaseEnumField BooleanField = fields.BooleanField DateTimeField = fields.DateTimeField Enum = fields.Enum FloatField = fields.FloatField IntegerField = fields.IntegerField ListOfStringsField = fields.ListOfStringsField NonNegativeFloatField = fields.NonNegativeFloatField NonNegativeIntegerField = fields.NonNegativeIntegerField ObjectField = fields.ObjectField StringField = fields.StringField UnspecifiedDefault = fields.UnspecifiedDefault class UUIDField(fields.UUIDField): def coerce(self, obj, attr, value): if value is None or value == "": return self._null(obj, attr) else: return self._type.coerce(obj, attr, value) class Numeric(fields.FieldType): @staticmethod def coerce(obj, attr, value): if value is None: return value f_value = float(value) return f_value if not f_value.is_integer() else value class NumericField(fields.AutoTypedField): AUTO_TYPE = Numeric() class DictField(fields.AutoTypedField): AUTO_TYPE = fields.Dict(fields.FieldType()) class ListOfUUIDsField(fields.AutoTypedField): AUTO_TYPE = fields.List(fields.UUID()) class FlexibleDict(fields.FieldType): @staticmethod def coerce(obj, attr, value): if isinstance(value, str): value = ast.literal_eval(value) return dict(value) class FlexibleDictField(fields.AutoTypedField): AUTO_TYPE = FlexibleDict() # TODO(lucasagomes): In our code we've always translated None to {}, # this method makes this field to work like this. But probably won't # be accepted as-is in the oslo_versionedobjects library def _null(self, obj, attr): if self.nullable: return {} super(FlexibleDictField, self)._null(obj, attr) class FlexibleListOfDict(fields.FieldType): @staticmethod def coerce(obj, attr, value): if isinstance(value, str): value = ast.literal_eval(value) return list(value) class FlexibleListOfDictField(fields.AutoTypedField): AUTO_TYPE = FlexibleListOfDict() # TODO(lucasagomes): In our code we've always translated None to {}, # this method makes this field to work like this. But probably won't # be accepted as-is in the oslo_versionedobjects library def _null(self, obj, attr): if self.nullable: return [] super(FlexibleListOfDictField, self)._null(obj, attr) class Json(fields.FieldType): def coerce(self, obj, attr, value): if isinstance(value, str): loaded = jsonutils.loads(value) return loaded return value def from_primitive(self, obj, attr, value): return self.coerce(obj, attr, value) def to_primitive(self, obj, attr, value): return jsonutils.dumps(value) class JsonField(fields.AutoTypedField): AUTO_TYPE = Json() # ### Notification fields ### # class BaseWatcherEnum(Enum): ALL = () def __init__(self, **kwargs): super(BaseWatcherEnum, self).__init__(valid_values=self.__class__.ALL) class NotificationPriority(BaseWatcherEnum): DEBUG = 'debug' INFO = 'info' WARNING = 'warning' ERROR = 'error' CRITICAL = 'critical' ALL = (DEBUG, INFO, WARNING, ERROR, CRITICAL) class NotificationPhase(BaseWatcherEnum): START = 'start' END = 'end' ERROR = 'error' ALL = (START, END, ERROR) class NotificationAction(BaseWatcherEnum): CREATE = 'create' UPDATE = 'update' EXCEPTION = 'exception' DELETE = 'delete' STRATEGY = 'strategy' PLANNER = 'planner' EXECUTION = 'execution' CANCEL = 'cancel' ALL = (CREATE, UPDATE, EXCEPTION, DELETE, STRATEGY, PLANNER, EXECUTION, CANCEL) class NotificationPriorityField(BaseEnumField): AUTO_TYPE = NotificationPriority() class NotificationPhaseField(BaseEnumField): AUTO_TYPE = NotificationPhase() class NotificationActionField(BaseEnumField): AUTO_TYPE = NotificationAction() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/goal.py0000664000175000017500000001516500000000000021247 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class Goal(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'display_name': wfields.StringField(), 'efficacy_specification': wfields.FlexibleListOfDictField(), } @base.remotable_classmethod def get(cls, context, goal_id): """Find a goal based on its id or uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Goal(context) :param goal_id: the id *or* uuid of a goal. :returns: a :class:`Goal` object. """ if utils.is_int_like(goal_id): return cls.get_by_id(context, goal_id) elif utils.is_uuid_like(goal_id): return cls.get_by_uuid(context, goal_id) else: raise exception.InvalidIdentity(identity=goal_id) @base.remotable_classmethod def get_by_id(cls, context, goal_id): """Find a goal based on its integer id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Goal(context) :param goal_id: the id *or* uuid of a goal. :returns: a :class:`Goal` object. """ db_goal = cls.dbapi.get_goal_by_id(context, goal_id) goal = cls._from_db_object(cls(context), db_goal) return goal @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find a goal based on uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Goal(context) :param uuid: the uuid of a goal. :returns: a :class:`Goal` object. """ db_goal = cls.dbapi.get_goal_by_uuid(context, uuid) goal = cls._from_db_object(cls(context), db_goal) return goal @base.remotable_classmethod def get_by_name(cls, context, name): """Find a goal based on name :param name: the name of a goal. :param context: Security context :returns: a :class:`Goal` object. """ db_goal = cls.dbapi.get_goal_by_name(context, name) goal = cls._from_db_object(cls(context), db_goal) return goal @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None): """Return a list of :class:`Goal` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Goal(context) :param filters: dict mapping the filter key to a value. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`Goal` object. """ db_goals = cls.dbapi.get_goal_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj) for obj in db_goals] @base.remotable def create(self): """Create a :class:`Goal` record in the DB""" values = self.obj_get_changes() db_goal = self.dbapi.create_goal(values) self._from_db_object(self, db_goal) def destroy(self): """Delete the :class:`Goal` from the DB""" self.dbapi.destroy_goal(self.id) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this :class:`Goal`. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_goal(self.uuid, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) self.obj_reset_changes() @base.remotable def refresh(self): """Loads updates for this :class:`Goal`. Loads a goal with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded goal column by column, if there are any updates. """ current = self.get_by_uuid(self._context, uuid=self.uuid) self.obj_refresh(current) @base.remotable def soft_delete(self): """Soft Delete the :class:`Goal` from the DB""" db_obj = self.dbapi.soft_delete_goal(self.uuid) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/scoring_engine.py0000664000175000017500000002040200000000000023304 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2016 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :ref:`Scoring Engine ` is an instance of a data model, to which a learning data was applied. Because there might be multiple algorithms used to build a particular data model (and therefore a scoring engine), the usage of scoring engine might vary. A metainfo field is supposed to contain any information which might be needed by the user of a given scoring engine. """ from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class ScoringEngine(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'description': wfields.StringField(nullable=True), 'metainfo': wfields.StringField(nullable=True), } @base.remotable_classmethod def get(cls, context, scoring_engine_id): """Find a scoring engine based on its id or uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ScoringEngine(context) :param scoring_engine_name: the name of a scoring_engine. :returns: a :class:`ScoringEngine` object. """ if utils.is_int_like(scoring_engine_id): return cls.get_by_id(context, scoring_engine_id) elif utils.is_uuid_like(scoring_engine_id): return cls.get_by_uuid(context, scoring_engine_id) else: raise exception.InvalidIdentity(identity=scoring_engine_id) @base.remotable_classmethod def get_by_id(cls, context, scoring_engine_id): """Find a scoring engine based on its id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ScoringEngine(context) :param scoring_engine_id: the id of a scoring_engine. :returns: a :class:`ScoringEngine` object. """ db_scoring_engine = cls.dbapi.get_scoring_engine_by_id( context, scoring_engine_id) scoring_engine = ScoringEngine._from_db_object(cls(context), db_scoring_engine) return scoring_engine @base.remotable_classmethod def get_by_uuid(cls, context, scoring_engine_uuid): """Find a scoring engine based on its uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ScoringEngine(context) :param scoring_engine_uuid: the uuid of a scoring_engine. :returns: a :class:`ScoringEngine` object. """ db_scoring_engine = cls.dbapi.get_scoring_engine_by_uuid( context, scoring_engine_uuid) scoring_engine = ScoringEngine._from_db_object(cls(context), db_scoring_engine) return scoring_engine @base.remotable_classmethod def get_by_name(cls, context, scoring_engine_name): """Find a scoring engine based on its name :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ScoringEngine(context) :param scoring_engine_name: the name of a scoring_engine. :returns: a :class:`ScoringEngine` object. """ db_scoring_engine = cls.dbapi.get_scoring_engine_by_name( context, scoring_engine_name) scoring_engine = ScoringEngine._from_db_object(cls(context), db_scoring_engine) return scoring_engine @base.remotable_classmethod def list(cls, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Return a list of :class:`ScoringEngine` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ScoringEngine(context) :param filters: dict mapping the filter key to a value. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`ScoringEngine` objects. """ db_scoring_engines = cls.dbapi.get_scoring_engine_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj) for obj in db_scoring_engines] @base.remotable def create(self): """Create a :class:`ScoringEngine` record in the DB.""" values = self.obj_get_changes() db_scoring_engine = self.dbapi.create_scoring_engine(values) self._from_db_object(self, db_scoring_engine) def destroy(self): """Delete the :class:`ScoringEngine` from the DB""" self.dbapi.destroy_scoring_engine(self.id) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this :class:`ScoringEngine`. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_scoring_engine(self.uuid, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) self.obj_reset_changes() def refresh(self): """Loads updates for this :class:`ScoringEngine`. Loads a scoring_engine with the same id from the database and checks for updated attributes. Updates are applied from the loaded scoring_engine column by column, if there are any updates. """ current = self.get_by_id(self._context, scoring_engine_id=self.id) self.obj_refresh(current) def soft_delete(self): """Soft Delete the :class:`ScoringEngine` from the DB""" db_obj = self.dbapi.soft_delete_scoring_engine(self.id) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/service.py0000664000175000017500000001244700000000000021765 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.objects import base from watcher.objects import fields as wfields class ServiceStatus(object): ACTIVE = 'ACTIVE' FAILED = 'FAILED' @base.WatcherObjectRegistry.register class Service(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'name': wfields.StringField(), 'host': wfields.StringField(), 'last_seen_up': wfields.DateTimeField( tzinfo_aware=False, nullable=True), } @base.remotable_classmethod def get(cls, context, service_id): """Find a service based on its id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Service(context) :param service_id: the id of a service. :returns: a :class:`Service` object. """ if utils.is_int_like(service_id): db_service = cls.dbapi.get_service_by_id(context, service_id) service = Service._from_db_object(cls(context), db_service) return service else: raise exception.InvalidIdentity(identity=service_id) @base.remotable_classmethod def get_by_name(cls, context, name): """Find a service based on name :param name: the name of a service. :param context: Security context :returns: a :class:`Service` object. """ db_service = cls.dbapi.get_service_by_name(context, name) service = cls._from_db_object(cls(context), db_service) return service @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None): """Return a list of :class:`Service` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Service(context) :param filters: dict mapping the filter key to a value. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`Service` object. """ db_services = cls.dbapi.get_service_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj) for obj in db_services] @base.remotable def create(self): """Create a :class:`Service` record in the DB.""" values = self.obj_get_changes() db_service = self.dbapi.create_service(values) self._from_db_object(self, db_service) @base.remotable def save(self): """Save updates to this :class:`Service`. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_service(self.id, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) self.obj_reset_changes() def refresh(self): """Loads updates for this :class:`Service`. Loads a service with the same id from the database and checks for updated attributes. Updates are applied from the loaded service column by column, if there are any updates. """ current = self.get(self._context, service_id=self.id) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] def soft_delete(self): """Soft Delete the :class:`Service` from the DB.""" db_obj = self.dbapi.soft_delete_service(self.id) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/objects/strategy.py0000664000175000017500000002363400000000000022167 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class Strategy(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added Goal object field VERSION = '1.1' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'display_name': wfields.StringField(), 'goal_id': wfields.IntegerField(), 'parameters_spec': wfields.FlexibleDictField(nullable=True), 'goal': wfields.ObjectField('Goal', nullable=True), } object_fields = {'goal': (objects.Goal, 'goal_id')} @base.remotable_classmethod def get(cls, context, strategy_id, eager=False): """Find a strategy based on its id or uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :param strategy_id: the id *or* uuid of a strategy. :param eager: Load object fields if True (Default: False) :returns: A :class:`Strategy` object. """ if utils.is_int_like(strategy_id): return cls.get_by_id(context, strategy_id, eager=eager) elif utils.is_uuid_like(strategy_id): return cls.get_by_uuid(context, strategy_id, eager=eager) else: raise exception.InvalidIdentity(identity=strategy_id) @base.remotable_classmethod def get_by_id(cls, context, strategy_id, eager=False): """Find a strategy based on its integer id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :param strategy_id: the id of a strategy. :param eager: Load object fields if True (Default: False) :returns: A :class:`Strategy` object. """ db_strategy = cls.dbapi.get_strategy_by_id( context, strategy_id, eager=eager) strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) return strategy @base.remotable_classmethod def get_by_uuid(cls, context, uuid, eager=False): """Find a strategy based on uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :param uuid: the uuid of a strategy. :param eager: Load object fields if True (Default: False) :returns: A :class:`Strategy` object. """ db_strategy = cls.dbapi.get_strategy_by_uuid( context, uuid, eager=eager) strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) return strategy @base.remotable_classmethod def get_by_name(cls, context, name, eager=False): """Find a strategy based on name :param context: Security context :param name: the name of a strategy. :param eager: Load object fields if True (Default: False) :returns: A :class:`Strategy` object. """ db_strategy = cls.dbapi.get_strategy_by_name( context, name, eager=eager) strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) return strategy @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None, eager=False): """Return a list of :class:`Strategy` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param filters: dict mapping the filter key to a value. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc`". :param eager: Load object fields if True (Default: False) :returns: a list of :class:`Strategy` object. """ db_strategies = cls.dbapi.get_strategy_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj, eager=eager) for obj in db_strategies] @base.remotable def create(self, context=None): """Create a :class:`Strategy` record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :returns: A :class:`Strategy` object. """ values = self.obj_get_changes() db_strategy = self.dbapi.create_strategy(values) # Note(v-francoise): Always load eagerly upon creation so we can send # notifications containing information about the related relationships self._from_db_object(self, db_strategy, eager=True) def destroy(self, context=None): """Delete the :class:`Strategy` from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) """ self.dbapi.destroy_strategy(self.id) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this :class:`Strategy`. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) """ updates = self.obj_get_changes() self.dbapi.update_strategy(self.id, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None, eager=False): """Loads updates for this :class:`Strategy`. Loads a strategy with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded strategy column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :param eager: Load object fields if True (Default: False) """ current = self.__class__.get_by_id( self._context, strategy_id=self.id, eager=eager) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] @base.remotable def soft_delete(self, context=None): """Soft Delete the :class:`Strategy` from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) """ self.dbapi.soft_delete_strategy(self.id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6391354 python_watcher-14.0.0/watcher/tests/0000775000175000017500000000000000000000000017454 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/__init__.py0000664000175000017500000000251000000000000021563 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(sean-k-mooney): watcher does not split up the tests that need eventlet # and those that do not currently so we need to monkey patch all the tests. # as an example the watcher.test.cmd module is importing watcher.cmd, # that has the side effect of monkey patching the test executor # after many modules are already imported. from watcher import eventlet eventlet.patch() from watcher import objects # noqa E402 # NOTE(comstud): Make sure we have all of the objects loaded. We do this # at module import time, because we may be using mock decorators in our # tests that run at import time. objects.register_all() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6391354 python_watcher-14.0.0/watcher/tests/api/0000775000175000017500000000000000000000000020225 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/__init__.py0000664000175000017500000000000000000000000022324 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/base.py0000664000175000017500000002730300000000000021516 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for API tests.""" # NOTE: Ported from ceilometer/tests/api.py (subsequently moved to # ceilometer/tests/api/__init__.py). This should be oslo'ified: # https://bugs.launchpad.net/watcher/+bug/1255115. # NOTE(deva): import auth_token so we can override a config option import copy from unittest import mock from urllib import parse as urlparse from oslo_config import cfg import pecan import pecan.testing from watcher.api import hooks from watcher.common import context as watcher_context from watcher.notifications import service as n_service from watcher.tests.db import base PATH_PREFIX = '/v1' class FunctionalTest(base.DbTestCase): """Pecan controller functional testing class. Used for functional tests of Pecan controllers where you need to test your literal application and its integration with the framework. """ SOURCE_DATA = {'test_source': {'somekey': '666'}} def setUp(self): super(FunctionalTest, self).setUp() cfg.CONF.set_override("auth_version", "v2.0", group='keystone_authtoken') cfg.CONF.set_override("admin_user", "admin", group='keystone_authtoken') p_services = mock.patch.object(n_service, "send_service_update", new_callable=mock.PropertyMock) self.m_services = p_services.start() self.addCleanup(p_services.stop) self.app = self._make_app() def reset_pecan(): pecan.set_config({}, overwrite=True) self.addCleanup(reset_pecan) def _make_app(self, enable_acl=False): # Determine where we are so we can set up paths in the config root_dir = self.get_path() self.config = { 'app': { 'root': 'watcher.api.controllers.root.RootController', 'modules': ['watcher.api'], 'hooks': [ hooks.ContextHook(), hooks.NoExceptionTracebackHook() ], 'template_path': '%s/api/templates' % root_dir, 'enable_acl': enable_acl, 'acl_public_routes': ['/', '/v1'], }, } return pecan.testing.load_test_app(self.config) def _request_json(self, path, params, expect_errors=False, headers=None, method="post", extra_environ=None, status=None, path_prefix=PATH_PREFIX): """Sends simulated HTTP request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param method: Request method type. Appropriate method function call should be used rather than passing attribute in. :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response :param path_prefix: prefix of the url path """ full_path = path_prefix + path response = getattr(self.app, "%s_json" % method)( str(full_path), params=params, headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors ) return response def put_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PUT request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="put") def post(self, *args, **kwargs): headers = kwargs.pop('headers', {}) headers.setdefault('Accept', 'application/json') kwargs['headers'] = headers return self.app.post(*args, **kwargs) def post_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP POST request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="post") def patch_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PATCH request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="patch") def delete(self, path, expect_errors=False, headers=None, extra_environ=None, status=None, path_prefix=PATH_PREFIX): """Sends simulated HTTP DELETE request to Pecan test app. :param path: url path of target service :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response :param path_prefix: prefix of the url path """ full_path = path_prefix + path response = self.app.delete(str(full_path), headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors) return response def get_json(self, path, expect_errors=False, headers=None, extra_environ=None, q=[], path_prefix=PATH_PREFIX, return_json=True, **params): """Sends simulated HTTP GET request to Pecan test app. :param path: url path of target service :param expect_errors: Boolean value;whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param q: list of queries consisting of: field, value, op, and type keys :param path_prefix: prefix of the url path :param params: content for wsgi.input of request """ full_path = path_prefix + path query_params = {'q.field': [], 'q.value': [], 'q.op': [], } for query in q: for name in ['field', 'op', 'value']: query_params['q.%s' % name].append(query.get(name, '')) all_params = {} all_params.update(params) if q: all_params.update(query_params) response = self.app.get(full_path, params=all_params, headers=headers, extra_environ=extra_environ, expect_errors=expect_errors) if return_json and not expect_errors: response = response.json return response def validate_link(self, link, bookmark=False): """Checks if the given link can get correct data.""" # removes the scheme and net location parts of the link url_parts = list(urlparse.urlparse(link)) url_parts[0] = url_parts[1] = '' # bookmark link should not have the version in the URL if bookmark and url_parts[2].startswith(PATH_PREFIX): return False full_path = urlparse.urlunparse(url_parts) try: self.get_json(full_path, path_prefix='') return True except Exception: return False class AdminRoleTest(base.DbTestCase): def setUp(self): super(AdminRoleTest, self).setUp() token_info = { 'token': { 'project': { 'id': 'admin' }, 'user': { 'id': 'admin' } } } self.context = watcher_context.RequestContext( auth_token_info=token_info, project_id='admin', user_id='admin') def make_context(*args, **kwargs): # If context hasn't been constructed with token_info if not kwargs.get('auth_token_info'): kwargs['auth_token_info'] = copy.deepcopy(token_info) if not kwargs.get('project_id'): kwargs['project_id'] = 'admin' if not kwargs.get('user_id'): kwargs['user_id'] = 'admin' if not kwargs.get('roles'): kwargs['roles'] = ['admin'] context = watcher_context.RequestContext(*args, **kwargs) return watcher_context.RequestContext.from_dict(context.to_dict()) p = mock.patch.object(watcher_context, 'make_context', side_effect=make_context) self.mock_make_context = p.start() self.addCleanup(p.stop) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/test_base.py0000664000175000017500000000225500000000000022554 0ustar00zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from watcher.tests.api import base class TestBase(base.FunctionalTest): def test_api_setup(self): pass def test_bad_uri(self): response = self.get_json('/bad/path', expect_errors=True, headers={"Accept": "application/json"}) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/test_config.py0000664000175000017500000000253100000000000023104 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib from oslo_config import cfg from watcher.api import config as api_config from watcher.tests.api import base class TestRoot(base.FunctionalTest): def test_config_enable_webhooks_auth(self): acl_public_routes = ['/'] cfg.CONF.set_override('enable_webhooks_auth', True, 'api') importlib.reload(api_config) self.assertEqual(acl_public_routes, api_config.app['acl_public_routes']) def test_config_disable_webhooks_auth(self): acl_public_routes = ['/', '/v1/webhooks/.*'] cfg.CONF.set_override('enable_webhooks_auth', False, 'api') importlib.reload(api_config) self.assertEqual(acl_public_routes, api_config.app['acl_public_routes']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/test_hooks.py0000664000175000017500000002430500000000000022765 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the Pecan API hooks.""" from http import client as http_client from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils from unittest import mock from watcher.api.controllers import root from watcher.api import hooks from watcher.common import context from watcher.tests.api import base class FakeRequest(object): def __init__(self, headers, context, environ): self.headers = headers self.context = context self.environ = environ or {} self.version = (1, 0) self.host_url = 'http://127.0.0.1:6385' class FakeRequestState(object): def __init__(self, headers=None, context=None, environ=None): self.request = FakeRequest(headers, context, environ) self.response = FakeRequest(headers, context, environ) def set_context(self): headers = self.request.headers creds = { 'user': headers.get('X-User') or headers.get('X-User-Id'), 'domain_id': headers.get('X-User-Domain-Id'), 'domain_name': headers.get('X-User-Domain-Name'), 'auth_token': headers.get('X-Auth-Token'), 'roles': headers.get('X-Roles', '').split(','), } is_admin = ('admin' in creds['roles'] or 'administrator' in creds['roles']) is_public_api = self.request.environ.get('is_public_api', False) self.request.context = context.RequestContext( is_admin=is_admin, is_public_api=is_public_api, **creds) def fake_headers(admin=False): headers = { 'X-Auth-Token': '8d9f235ca7464dd7ba46f81515797ea0', 'X-Domain-Id': 'None', 'X-Domain-Name': 'None', 'X-Project-Domain-Id': 'default', 'X-Project-Domain-Name': 'Default', 'X-Role': '_member_,admin', 'X-Roles': '_member_,admin', # 'X-Tenant': 'foo', # 'X-Tenant-Id': 'b4efa69d4ffa4973863f2eefc094f7f8', # 'X-Tenant-Name': 'foo', 'X-User': 'foo', 'X-User-Domain-Id': 'default', 'X-User-Domain-Name': 'Default', 'X-User-Id': '604ab2a197c442c2a84aba66708a9e1e', 'X-User-Name': 'foo', } if admin: headers.update({ 'X-Project-Name': 'admin', 'X-Role': '_member_,admin', 'X-Roles': '_member_,admin', 'X-Tenant': 'admin', # 'X-Tenant-Name': 'admin', # 'X-Tenant': 'admin' 'X-Tenant-Name': 'admin', 'X-Tenant-Id': 'c2a3a69d456a412376efdd9dac38', 'X-Project-Id': 'c2a3a69d456a412376efdd9dac38', }) else: headers.update({ 'X-Role': '_member_', 'X-Roles': '_member_', 'X-Tenant': 'foo', 'X-Tenant-Name': 'foo', 'X-Tenant-Id': 'b4efa69d,4ffa4973863f2eefc094f7f8', 'X-Project-Name': 'foo', 'X-Project-Id': 'b4efa69d4ffa4973863f2eefc094f7f8', }) return headers class TestNoExceptionTracebackHook(base.FunctionalTest): TRACE = ['Traceback (most recent call last):', ' File "/opt/stack/watcher/watcher/common/rpc/amqp.py",' ' line 434, in _process_data\\n **args)', ' File "/opt/stack/watcher/watcher/common/rpc/' 'dispatcher.py", line 172, in dispatch\\n result =' ' getattr(proxyobj, method)(ctxt, **kwargs)'] MSG_WITHOUT_TRACE = "Test exception message." MSG_WITH_TRACE = MSG_WITHOUT_TRACE + "\n" + "\n".join(TRACE) def setUp(self): super(TestNoExceptionTracebackHook, self).setUp() p = mock.patch.object(root.Root, 'convert') self.root_convert_mock = p.start() self.addCleanup(p.stop) cfg.CONF.set_override('debug', False) def test_hook_exception_success(self): self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = jsonutils.loads( response.json['error_message'])['faultstring'] self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) def test_hook_remote_error_success(self): test_exc_type = 'TestException' self.root_convert_mock.side_effect = messaging.rpc.RemoteError( test_exc_type, self.MSG_WITHOUT_TRACE, self.TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) # NOTE(max_lobur): For RemoteError the client message will still have # some garbage because in RemoteError traceback is serialized as a list # instead of'\n'.join(trace). But since RemoteError is kind of very # rare thing (happens due to wrong deserialization settings etc.) # we don't care about this garbage. expected_msg = ("Remote error: %s %s" % (test_exc_type, self.MSG_WITHOUT_TRACE) + "\n['") actual_msg = jsonutils.loads( response.json['error_message'])['faultstring'] self.assertEqual(expected_msg, actual_msg) def _test_hook_without_traceback(self): msg = "Error message without traceback \n but \n multiline" self.root_convert_mock.side_effect = Exception(msg) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = jsonutils.loads( response.json['error_message'])['faultstring'] self.assertEqual(msg, actual_msg) def test_hook_without_traceback(self): self._test_hook_without_traceback() def test_hook_without_traceback_debug(self): cfg.CONF.set_override('debug', True) self._test_hook_without_traceback() def _test_hook_on_serverfault(self): self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = jsonutils.loads( response.json['error_message'])['faultstring'] return actual_msg def test_hook_on_serverfault(self): cfg.CONF.set_override('debug', False) msg = self._test_hook_on_serverfault() self.assertEqual(self.MSG_WITHOUT_TRACE, msg) def test_hook_on_serverfault_debug(self): cfg.CONF.set_override('debug', True) msg = self._test_hook_on_serverfault() self.assertEqual(self.MSG_WITH_TRACE, msg) def _test_hook_on_clientfault(self): client_error = Exception(self.MSG_WITH_TRACE) client_error.code = http_client.BAD_REQUEST self.root_convert_mock.side_effect = client_error response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = jsonutils.loads( response.json['error_message'])['faultstring'] return actual_msg def test_hook_on_clientfault(self): msg = self._test_hook_on_clientfault() self.assertEqual(self.MSG_WITHOUT_TRACE, msg) def test_hook_on_clientfault_debug_tracebacks(self): cfg.CONF.set_override('debug', True) msg = self._test_hook_on_clientfault() self.assertEqual(self.MSG_WITH_TRACE, msg) class TestContextHook(base.FunctionalTest): @mock.patch.object(context, 'RequestContext') def test_context_hook_not_admin(self, mock_ctx): cfg.CONF.set_override( 'auth_type', 'password', group='watcher_clients_auth') headers = fake_headers(admin=False) reqstate = FakeRequestState(headers=headers) context_hook = hooks.ContextHook() context_hook.before(reqstate) mock_ctx.assert_called_with( auth_token=headers['X-Auth-Token'], user=headers['X-User'], user_id=headers['X-User-Id'], domain_id=headers['X-User-Domain-Id'], domain_name=headers['X-User-Domain-Name'], project=headers['X-Project-Name'], project_id=headers['X-Project-Id'], show_deleted=None, auth_token_info=self.token_info, roles=headers['X-Roles'].split(',')) @mock.patch.object(context, 'RequestContext') def test_context_hook_admin(self, mock_ctx): cfg.CONF.set_override( 'auth_type', 'password', group='watcher_clients_auth') headers = fake_headers(admin=True) reqstate = FakeRequestState(headers=headers) context_hook = hooks.ContextHook() context_hook.before(reqstate) mock_ctx.assert_called_with( auth_token=headers['X-Auth-Token'], user=headers['X-User'], user_id=headers['X-User-Id'], domain_id=headers['X-User-Domain-Id'], domain_name=headers['X-User-Domain-Name'], project=headers['X-Project-Name'], project_id=headers['X-Project-Id'], show_deleted=None, auth_token_info=self.token_info, roles=headers['X-Roles'].split(',')) @mock.patch.object(context, 'RequestContext') def test_context_hook_public_api(self, mock_ctx): cfg.CONF.set_override( 'auth_type', 'password', group='watcher_clients_auth') headers = fake_headers(admin=True) env = {'is_public_api': True} reqstate = FakeRequestState(headers=headers, environ=env) context_hook = hooks.ContextHook() context_hook.before(reqstate) mock_ctx.assert_called_with( auth_token=headers['X-Auth-Token'], user=headers['X-User'], user_id=headers['X-User-Id'], domain_id=headers['X-User-Domain-Id'], domain_name=headers['X-User-Domain-Name'], project=headers['X-Project-Name'], project_id=headers['X-Project-Id'], show_deleted=None, auth_token_info=self.token_info, roles=headers['X-Roles'].split(',')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/test_root.py0000664000175000017500000000541600000000000022627 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from watcher.tests.api import base class TestRoot(base.FunctionalTest): def test_get_root(self): data = self.get_json('/', path_prefix='') self.assertEqual('v1', data['default_version']['id']) # Check fields are not empty [self.assertNotIn(f, ['', []]) for f in data.keys()] class TestV1Root(base.FunctionalTest): def test_get_v1_root_all(self): data = self.get_json( '/', headers={'OpenStack-API-Version': 'infra-optim 1.4'}) self.assertEqual('v1', data['id']) # Check fields are not empty for f in data.keys(): self.assertNotIn(f, ['', []]) # Check if all known resources are present and there are no extra ones. not_resources = ('id', 'links', 'media_types') actual_resources = tuple(set(data.keys()) - set(not_resources)) expected_resources = ('audit_templates', 'audits', 'actions', 'action_plans', 'data_model', 'scoring_engines', 'services', 'webhooks') self.assertEqual(sorted(expected_resources), sorted(actual_resources)) self.assertIn({'type': 'application/vnd.openstack.watcher.v1+json', 'base': 'application/json'}, data['media_types']) def test_get_v1_root_without_datamodel(self): data = self.get_json( '/', headers={'OpenStack-API-Version': 'infra-optim 1.2'}) self.assertEqual('v1', data['id']) # Check fields are not empty for f in data.keys(): self.assertNotIn(f, ['', []]) # Check if all known resources are present and there are no extra ones. not_resources = ('id', 'links', 'media_types') actual_resources = tuple(set(data.keys()) - set(not_resources)) expected_resources = ('audit_templates', 'audits', 'actions', 'action_plans', 'scoring_engines', 'services') self.assertEqual(sorted(expected_resources), sorted(actual_resources)) self.assertIn({'type': 'application/vnd.openstack.watcher.v1+json', 'base': 'application/json'}, data['media_types']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/test_scheduling.py0000664000175000017500000001202200000000000023760 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from apscheduler.schedulers import background import freezegun from oslo_utils import timeutils from watcher.api import scheduling from watcher.notifications import service from watcher import objects from watcher.tests import base from watcher.tests.db import base as db_base from watcher.tests.db import utils class TestSchedulingService(base.TestCase): @mock.patch.object(background.BackgroundScheduler, 'start') def test_start_scheduling_service(self, m_start): scheduler = scheduling.APISchedulingService() scheduler.start() m_start.assert_called_once_with(scheduler) jobs = scheduler.get_jobs() self.assertEqual(1, len(jobs)) class TestSchedulingServiceFunctions(db_base.DbTestCase): def setUp(self): super(TestSchedulingServiceFunctions, self).setUp() fake_service = utils.get_test_service( created_at=timeutils.utcnow()) self.fake_service = objects.Service(**fake_service) @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') @mock.patch.object(objects.Service, 'list') @mock.patch.object(service, 'send_service_update') def test_get_services_status_without_services_in_list( self, mock_service_update, mock_get_list, mock_service_status): scheduler = scheduling.APISchedulingService() mock_get_list.return_value = [self.fake_service] mock_service_status.return_value = 'ACTIVE' scheduler.get_services_status(mock.ANY) mock_service_status.assert_called_once_with(mock.ANY, self.fake_service.id) mock_service_update.assert_not_called() @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') @mock.patch.object(objects.Service, 'list') @mock.patch.object(service, 'send_service_update') def test_get_services_status_with_services_in_list_same_status( self, mock_service_update, mock_get_list, mock_service_status): scheduler = scheduling.APISchedulingService() mock_get_list.return_value = [self.fake_service] scheduler.services_status = {1: 'ACTIVE'} mock_service_status.return_value = 'ACTIVE' scheduler.get_services_status(mock.ANY) mock_service_status.assert_called_once_with(mock.ANY, self.fake_service.id) mock_service_update.assert_not_called() @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') @mock.patch.object(objects.Service, 'list') @mock.patch.object(service, 'send_service_update') def test_get_services_status_with_services_in_list_diff_status( self, mock_service_update, mock_get_list, mock_service_status): scheduler = scheduling.APISchedulingService() mock_get_list.return_value = [self.fake_service] scheduler.services_status = {1: 'FAILED'} mock_service_status.return_value = 'ACTIVE' scheduler.get_services_status(mock.ANY) mock_service_status.assert_called_once_with(mock.ANY, self.fake_service.id) mock_service_update.assert_called_once_with(mock.ANY, self.fake_service, state='ACTIVE') @mock.patch.object(objects.Service, 'get') def test_get_service_status_failed_service( self, mock_get): scheduler = scheduling.APISchedulingService() mock_get.return_value = self.fake_service service_status = scheduler.get_service_status(mock.ANY, self.fake_service.id) mock_get.assert_called_once_with(mock.ANY, self.fake_service.id) self.assertEqual('FAILED', service_status) @freezegun.freeze_time('2016-09-22T08:32:26.219414') @mock.patch.object(objects.Service, 'get') def test_get_service_status_failed_active( self, mock_get): scheduler = scheduling.APISchedulingService() mock_get.return_value = self.fake_service service_status = scheduler.get_service_status(mock.ANY, self.fake_service.id) mock_get.assert_called_once_with(mock.ANY, self.fake_service.id) self.assertEqual('ACTIVE', service_status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/test_utils.py0000664000175000017500000000371700000000000023006 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import wsme from watcher.api.controllers.v1 import utils as v1_utils from watcher.tests import base class TestApiUtilsValidScenarios(base.TestCase): scenarios = [ ("limit=None + max_limit=None", {"limit": None, "max_limit": None, "expected": None}), ("limit=None + max_limit=1", {"limit": None, "max_limit": 1, "expected": 1}), # ("limit=0 + max_limit=None", # {"limit": 0, "max_limit": None, "expected": 0}), ("limit=1 + max_limit=None", {"limit": 1, "max_limit": None, "expected": 1}), ("limit=1 + max_limit=1", {"limit": 1, "max_limit": 1, "expected": 1}), ("limit=2 + max_limit=1", {"limit": 2, "max_limit": 1, "expected": 1}), ] def test_validate_limit(self): cfg.CONF.set_override("max_limit", self.max_limit, group="api") actual_limit = v1_utils.validate_limit(self.limit) self.assertEqual(self.expected, actual_limit) class TestApiUtilsInvalidScenarios(base.TestCase): scenarios = [ ("limit=0 + max_limit=None", {"limit": 0, "max_limit": None}), ] def test_validate_limit_invalid_cases(self): cfg.CONF.set_override("max_limit", self.max_limit, group="api") self.assertRaises( wsme.exc.ClientSideError, v1_utils.validate_limit, self.limit ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/utils.py0000664000175000017500000000726400000000000021750 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utils for testing the API service. """ import datetime from oslo_serialization import jsonutils from oslo_utils import timeutils from watcher.api.controllers.v1 import action as action_ctrl from watcher.api.controllers.v1 import action_plan as action_plan_ctrl from watcher.api.controllers.v1 import audit as audit_ctrl from watcher.api.controllers.v1 import audit_template as audit_template_ctrl from watcher.tests.db import utils as db_utils ADMIN_TOKEN = '4562138218392831' MEMBER_TOKEN = '4562138218392832' class FakeMemcache(object): """Fake cache that is used for keystone tokens lookup.""" _cache = { 'tokens/%s' % ADMIN_TOKEN: { 'access': { 'token': {'id': ADMIN_TOKEN, 'expires': '2100-09-11T00:00:00'}, 'user': {'id': 'user_id1', 'name': 'user_name1', 'tenantId': '123i2910', 'tenantName': 'mytenant', 'roles': [{'name': 'admin'}] }, } }, 'tokens/%s' % MEMBER_TOKEN: { 'access': { 'token': {'id': MEMBER_TOKEN, 'expires': '2100-09-11T00:00:00'}, 'user': {'id': 'user_id2', 'name': 'user-good', 'tenantId': 'project-good', 'tenantName': 'goodies', 'roles': [{'name': 'Member'}] } } } } def __init__(self): self.set_key = None self.set_value = None self.token_expiration = None def get(self, key): dt = timeutils.utcnow() + datetime.timedelta(minutes=5) return jsonutils.dumps((self._cache.get(key), dt.isoformat())) def set(self, key, value, time=0, min_compress_len=0): self.set_value = value self.set_key = key def remove_internal(values, internal): # NOTE(yuriyz): internal attributes should not be posted, except uuid int_attr = [attr.lstrip('/') for attr in internal if attr != '/uuid'] return dict( (k, v) for (k, v) in values.items() if k not in int_attr ) def audit_post_data(**kw): audit = db_utils.get_test_audit(**kw) internal = audit_ctrl.AuditPatchType.internal_attrs() return remove_internal(audit, internal) def audit_template_post_data(**kw): attrs = audit_template_ctrl.AuditTemplatePostType._wsme_attributes audit_template = db_utils.get_test_audit_template() fields = [field.key for field in attrs] post_data = {k: v for k, v in audit_template.items() if k in fields} post_data.update({k: v for k, v in kw.items() if k in fields}) return post_data def action_post_data(**kw): action = db_utils.get_test_action(**kw) internal = action_ctrl.ActionPatchType.internal_attrs() return remove_internal(action, internal) def action_plan_post_data(**kw): act_plan = db_utils.get_test_action_plan(**kw) internal = action_plan_ctrl.ActionPlanPatchType.internal_attrs() return remove_internal(act_plan, internal) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6431353 python_watcher-14.0.0/watcher/tests/api/v1/0000775000175000017500000000000000000000000020553 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/__init__.py0000664000175000017500000000000000000000000022652 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_actions.py0000664000175000017500000005565100000000000023640 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import itertools from unittest import mock from http import HTTPStatus from oslo_config import cfg from oslo_serialization import jsonutils from wsme import types as wtypes from watcher.api.controllers.v1 import action as api_action from watcher.common import utils from watcher.db import api as db_api from watcher import objects from watcher.tests.api import base as api_base from watcher.tests.api import utils as api_utils from watcher.tests import base from watcher.tests.db import utils as db_utils from watcher.tests.objects import utils as obj_utils def post_get_test_action(**kw): action = api_utils.action_post_data(**kw) action_plan = db_utils.get_test_action_plan() del action['action_plan_id'] action['action_plan_uuid'] = kw.get('action_plan_uuid', action_plan['uuid']) action['parents'] = None return action class TestActionObject(base.TestCase): def test_action_init(self): action_dict = api_utils.action_post_data(action_plan_id=None, parents=None) del action_dict['state'] action = api_action.Action(**action_dict) self.assertEqual(wtypes.Unset, action.state) class TestListAction(api_base.FunctionalTest): def setUp(self): super(TestListAction, self).setUp() self.goal = obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy(self.context) self.audit = obj_utils.create_test_audit(self.context) self.action_plan = obj_utils.create_test_action_plan(self.context) def test_empty(self): response = self.get_json('/actions') self.assertEqual([], response['actions']) def _assert_action_fields(self, action): action_fields = ['uuid', 'state', 'action_plan_uuid', 'action_type'] for field in action_fields: self.assertIn(field, action) def test_one(self): action = obj_utils.create_test_action(self.context, parents=None) response = self.get_json('/actions') self.assertEqual(action.uuid, response['actions'][0]["uuid"]) self._assert_action_fields(response['actions'][0]) def test_one_soft_deleted(self): action = obj_utils.create_test_action(self.context, parents=None) action.soft_delete() response = self.get_json('/actions', headers={'X-Show-Deleted': 'True'}) self.assertEqual(action.uuid, response['actions'][0]["uuid"]) self._assert_action_fields(response['actions'][0]) response = self.get_json('/actions') self.assertEqual([], response['actions']) def test_get_one(self): action = obj_utils.create_test_action(self.context, parents=None) response = self.get_json('/actions/%s' % action['uuid']) self.assertEqual(action.uuid, response['uuid']) self.assertEqual(action.action_type, response['action_type']) self.assertEqual(action.input_parameters, response['input_parameters']) self._assert_action_fields(response) def test_get_one_soft_deleted(self): action = obj_utils.create_test_action(self.context, parents=None) action.soft_delete() response = self.get_json('/actions/%s' % action['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(action.uuid, response['uuid']) self._assert_action_fields(response) response = self.get_json('/actions/%s' % action['uuid'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_detail(self): action = obj_utils.create_test_action(self.context, parents=None) response = self.get_json('/actions/detail') self.assertEqual(action.uuid, response['actions'][0]["uuid"]) self._assert_action_fields(response['actions'][0]) def test_detail_soft_deleted(self): action = obj_utils.create_test_action(self.context, parents=None) action.soft_delete() response = self.get_json('/actions/detail', headers={'X-Show-Deleted': 'True'}) self.assertEqual(action.uuid, response['actions'][0]["uuid"]) self._assert_action_fields(response['actions'][0]) response = self.get_json('/actions/detail') self.assertEqual([], response['actions']) def test_detail_against_single(self): action = obj_utils.create_test_action(self.context, parents=None) response = self.get_json('/actions/%s/detail' % action['uuid'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_many(self): action_list = [] for id_ in range(5): action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) action_list.append(action.uuid) response = self.get_json('/actions') self.assertEqual(len(action_list), len(response['actions'])) uuids = [s['uuid'] for s in response['actions']] self.assertEqual(sorted(action_list), sorted(uuids)) def test_many_with_action_plan_uuid(self): action_plan = obj_utils.create_test_action_plan( self.context, id=2, uuid=utils.generate_uuid(), audit_id=1) action_list = [] for id_ in range(5): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=2, uuid=utils.generate_uuid()) action_list.append(action.uuid) response = self.get_json('/actions') self.assertEqual(len(action_list), len(response['actions'])) for action in response['actions']: self.assertEqual(action_plan.uuid, action['action_plan_uuid']) def test_filter_by_audit_uuid(self): action_plan_1 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid()) action_list = [] for id_ in range(3): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan_1.id, uuid=utils.generate_uuid()) action_list.append(action.uuid) audit2 = obj_utils.create_test_audit( self.context, id=2, uuid=utils.generate_uuid(), name='My Audit {0}'.format(2)) action_plan_2 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=audit2.id) for id_ in range(4, 5, 6): obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan_2.id, uuid=utils.generate_uuid()) response = self.get_json('/actions?audit_uuid=%s' % self.audit.uuid) self.assertEqual(len(action_list), len(response['actions'])) for action in response['actions']: self.assertEqual(action_plan_1.uuid, action['action_plan_uuid']) def test_filter_by_action_plan_uuid(self): action_plan_1 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) action_list = [] for id_ in range(3): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan_1.id, uuid=utils.generate_uuid()) action_list.append(action.uuid) action_plan_2 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) for id_ in range(4, 5, 6): obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan_2.id, uuid=utils.generate_uuid()) response = self.get_json( '/actions?action_plan_uuid=%s' % action_plan_1.uuid) self.assertEqual(len(action_list), len(response['actions'])) for action in response['actions']: self.assertEqual(action_plan_1.uuid, action['action_plan_uuid']) response = self.get_json( '/actions?action_plan_uuid=%s' % action_plan_2.uuid) for action in response['actions']: self.assertEqual(action_plan_2.uuid, action['action_plan_uuid']) def test_details_and_filter_by_action_plan_uuid(self): action_plan = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) for id_ in range(1, 3): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan.id, uuid=utils.generate_uuid()) response = self.get_json( '/actions/detail?action_plan_uuid=%s' % action_plan.uuid) for action in response['actions']: self.assertEqual(action_plan.uuid, action['action_plan_uuid']) def test_details_and_filter_by_audit_uuid(self): action_plan = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) for id_ in range(1, 3): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan.id, uuid=utils.generate_uuid()) response = self.get_json( '/actions/detail?audit_uuid=%s' % self.audit.uuid) for action in response['actions']: self.assertEqual(action_plan.uuid, action['action_plan_uuid']) def test_filter_by_action_plan_and_audit_uuids(self): action_plan = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) url = '/actions?action_plan_uuid=%s&audit_uuid=%s' % ( action_plan.uuid, self.audit.uuid) response = self.get_json(url, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) def test_many_with_sort_key_uuid(self): action_plan = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) actions_list = [] for id_ in range(1, 3): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan.id, uuid=utils.generate_uuid()) actions_list.append(action) response = self.get_json('/actions?sort_key=%s' % 'uuid') names = [s['uuid'] for s in response['actions']] self.assertEqual( sorted([a.uuid for a in actions_list]), names) def test_many_with_sort_key_action_plan_uuid(self): action_plan_1 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) action_plan_2 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) action_plans_uuid_list = [] for id_, action_plan_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(action_plan_1.id, 3), itertools.repeat(action_plan_2.id, 2)]), 1): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan_id, uuid=utils.generate_uuid()) action_plans_uuid_list.append(action.action_plan.uuid) for direction in ['asc', 'desc']: response = self.get_json( '/actions?sort_key={0}&sort_dir={1}' .format('action_plan_uuid', direction)) action_plan_uuids = \ [s['action_plan_uuid'] for s in response['actions']] self.assertEqual( sorted(action_plans_uuid_list, reverse=(direction == 'desc')), action_plan_uuids, message='Failed on %s direction' % direction) def test_sort_key_validation(self): response = self.get_json( '/actions?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) def test_many_with_soft_deleted_action_plan_uuid(self): action_plan1 = obj_utils.create_test_action_plan( self.context, id=2, uuid=utils.generate_uuid(), audit_id=1) action_plan2 = obj_utils.create_test_action_plan( self.context, id=3, uuid=utils.generate_uuid(), audit_id=1) ap1_action_list = [] ap2_action_list = [] for id_ in range(0, 2): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan1.id, uuid=utils.generate_uuid()) ap1_action_list.append(action) for id_ in range(2, 4): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan2.id, uuid=utils.generate_uuid()) ap2_action_list.append(action) action_plan1.state = objects.action_plan.State.CANCELLED action_plan1.save() self.delete('/action_plans/%s' % action_plan1.uuid) response = self.get_json('/actions') # We deleted the actions from the 1st action plan so we've got 2 left self.assertEqual(len(ap2_action_list), len(response['actions'])) # We deleted them so that's normal self.assertEqual([], [act for act in response['actions'] if act['action_plan_uuid'] == action_plan1.uuid]) # Here are the 2 actions left self.assertEqual( set([act.as_dict()['uuid'] for act in ap2_action_list]), set([act['uuid'] for act in response['actions'] if act['action_plan_uuid'] == action_plan2.uuid])) def test_many_with_parents(self): action_list = [] for id_ in range(5): if id_ > 0: action = obj_utils.create_test_action( self.context, id=id_, uuid=utils.generate_uuid(), parents=[action_list[id_ - 1]]) else: action = obj_utils.create_test_action( self.context, id=id_, uuid=utils.generate_uuid(), parents=[]) action_list.append(action.uuid) response = self.get_json('/actions') response_actions = response['actions'] for id_ in range(4): self.assertEqual(response_actions[id_]['uuid'], response_actions[id_ + 1]['parents'][0]) def test_many_without_soft_deleted(self): action_list = [] for id_ in [1, 2, 3]: action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) action_list.append(action.uuid) for id_ in [4, 5]: action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) action.soft_delete() response = self.get_json('/actions') self.assertEqual(3, len(response['actions'])) uuids = [s['uuid'] for s in response['actions']] self.assertEqual(sorted(action_list), sorted(uuids)) def test_many_with_soft_deleted(self): action_list = [] for id_ in [1, 2, 3]: action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) action_list.append(action.uuid) for id_ in [4, 5]: action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) action.soft_delete() action_list.append(action.uuid) response = self.get_json('/actions', headers={'X-Show-Deleted': 'True'}) self.assertEqual(5, len(response['actions'])) uuids = [s['uuid'] for s in response['actions']] self.assertEqual(sorted(action_list), sorted(uuids)) def test_links(self): uuid = utils.generate_uuid() obj_utils.create_test_action(self.context, id=1, uuid=uuid) response = self.get_json('/actions/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for link in response['links']: bookmark = link['rel'] == 'bookmark' self.assertTrue(self.validate_link( link['href'], bookmark=bookmark)) def test_collection_links(self): parents = None for id_ in range(5): action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid(), parents=parents) parents = [action.uuid] response = self.get_json('/actions/?limit=3') self.assertEqual(3, len(response['actions'])) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) response = self.get_json('/actions') self.assertEqual(3, len(response['actions'])) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) obj_utils.create_test_action_plan(self.context) self.action = obj_utils.create_test_action(self.context, parents=None) p = mock.patch.object(db_api.BaseConnection, 'update_action') self.mock_action_update = p.start() self.mock_action_update.side_effect = self._simulate_rpc_action_update self.addCleanup(p.stop) def _simulate_rpc_action_update(self, action): action.save() return action @mock.patch('oslo_utils.timeutils.utcnow') def test_patch_not_allowed(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_state = objects.audit.State.SUCCEEDED response = self.get_json('/actions/%s' % self.action.uuid) self.assertNotEqual(new_state, response['state']) response = self.patch_json( '/actions/%s' % self.action.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) self.assertTrue(response.json['error_message']) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() self.goal = obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy(self.context) self.audit = obj_utils.create_test_audit(self.context) self.action_plan = obj_utils.create_test_action_plan(self.context) self.action = obj_utils.create_test_action(self.context, parents=None) p = mock.patch.object(db_api.BaseConnection, 'update_action') self.mock_action_update = p.start() self.mock_action_update.side_effect = self._simulate_rpc_action_update self.addCleanup(p.stop) def _simulate_rpc_action_update(self, action): action.save() return action @mock.patch('oslo_utils.timeutils.utcnow') def test_delete_action_not_allowed(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.delete('/actions/%s' % self.action.uuid, expect_errors=True) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestActionPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestActionPolicyEnforcement, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) obj_utils.create_test_action_plan(self.context) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "action:get_all", self.get_json, '/actions', expect_errors=True) def test_policy_disallow_get_one(self): action = obj_utils.create_test_action(self.context) self._common_policy_check( "action:get", self.get_json, '/actions/%s' % action.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "action:detail", self.get_json, '/actions/detail', expect_errors=True) class TestActionPolicyEnforcementWithAdminContext(TestListAction, api_base.AdminRoleTest): def setUp(self): super(TestActionPolicyEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "action:detail": "rule:default", "action:get": "rule:default", "action:get_all": "rule:default"}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_actions_plans.py0000664000175000017500000007235000000000000025030 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import itertools from unittest import mock from http import HTTPStatus from oslo_config import cfg from oslo_serialization import jsonutils from watcher.applier import rpcapi as aapi from watcher.common import utils from watcher.db import api as db_api from watcher import objects from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestListActionPlan(api_base.FunctionalTest): def setUp(self): super(TestListActionPlan, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) def test_empty(self): response = self.get_json('/action_plans') self.assertEqual([], response['action_plans']) def _assert_action_plans_fields(self, action_plan): action_plan_fields = [ 'uuid', 'audit_uuid', 'strategy_uuid', 'strategy_name', 'state', 'global_efficacy', 'efficacy_indicators'] for field in action_plan_fields: self.assertIn(field, action_plan) def test_one(self): action_plan = obj_utils.create_test_action_plan(self.context) response = self.get_json('/action_plans') self.assertEqual(action_plan.uuid, response['action_plans'][0]["uuid"]) self._assert_action_plans_fields(response['action_plans'][0]) def test_one_soft_deleted(self): action_plan = obj_utils.create_test_action_plan(self.context) action_plan.soft_delete() response = self.get_json('/action_plans', headers={'X-Show-Deleted': 'True'}) self.assertEqual(action_plan.uuid, response['action_plans'][0]["uuid"]) self._assert_action_plans_fields(response['action_plans'][0]) response = self.get_json('/action_plans') self.assertEqual([], response['action_plans']) def test_get_one_ok(self): action_plan = obj_utils.create_test_action_plan(self.context) obj_utils.create_test_efficacy_indicator( self.context, action_plan_id=action_plan['id']) response = self.get_json('/action_plans/%s' % action_plan['uuid']) self.assertEqual(action_plan.uuid, response['uuid']) self._assert_action_plans_fields(response) self.assertEqual( [{'description': 'Test indicator', 'name': 'test_indicator', 'value': 0.0, 'unit': '%'}], response['efficacy_indicators']) def test_get_one_soft_deleted(self): action_plan = obj_utils.create_test_action_plan(self.context) action_plan.soft_delete() response = self.get_json('/action_plans/%s' % action_plan['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(action_plan.uuid, response['uuid']) self._assert_action_plans_fields(response) response = self.get_json('/action_plans/%s' % action_plan['uuid'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_detail(self): action_plan = obj_utils.create_test_action_plan(self.context) response = self.get_json('/action_plans/detail') self.assertEqual(action_plan.uuid, response['action_plans'][0]["uuid"]) self._assert_action_plans_fields(response['action_plans'][0]) def test_detail_soft_deleted(self): action_plan = obj_utils.create_test_action_plan(self.context) action_plan.soft_delete() response = self.get_json('/action_plans/detail', headers={'X-Show-Deleted': 'True'}) self.assertEqual(action_plan.uuid, response['action_plans'][0]["uuid"]) self._assert_action_plans_fields(response['action_plans'][0]) response = self.get_json('/action_plans/detail') self.assertEqual([], response['action_plans']) def test_detail_against_single(self): action_plan = obj_utils.create_test_action_plan(self.context) response = self.get_json( '/action_plan/%s/detail' % action_plan['uuid'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_many(self): action_plan_list = [] for id_ in range(5): action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) action_plan_list.append(action_plan.uuid) response = self.get_json('/action_plans') self.assertEqual(len(action_plan_list), len(response['action_plans'])) uuids = [s['uuid'] for s in response['action_plans']] self.assertEqual(sorted(action_plan_list), sorted(uuids)) def test_many_with_soft_deleted_audit_uuid(self): action_plan_list = [] audit1 = obj_utils.create_test_audit( self.context, id=2, uuid=utils.generate_uuid(), name='My Audit {0}'.format(2)) audit2 = obj_utils.create_test_audit( self.context, id=3, uuid=utils.generate_uuid(), name='My Audit {0}'.format(3)) for id_ in range(0, 2): action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit1.id) action_plan_list.append(action_plan.uuid) for id_ in range(2, 4): action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit2.id) action_plan_list.append(action_plan.uuid) new_state = objects.audit.State.CANCELLED self.patch_json( '/audits/%s' % audit1.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}]) self.delete('/audits/%s' % audit1.uuid) response = self.get_json('/action_plans') self.assertEqual(len(action_plan_list), len(response['action_plans'])) for id_ in range(0, 2): action_plan = response['action_plans'][id_] self.assertIsNone(action_plan['audit_uuid']) for id_ in range(2, 4): action_plan = response['action_plans'][id_] self.assertEqual(audit2.uuid, action_plan['audit_uuid']) def test_many_with_audit_uuid(self): action_plan_list = [] audit = obj_utils.create_test_audit( self.context, id=2, uuid=utils.generate_uuid(), name='My Audit {0}'.format(2)) for id_ in range(2, 5): action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit.id) action_plan_list.append(action_plan.uuid) response = self.get_json('/action_plans') self.assertEqual(len(action_plan_list), len(response['action_plans'])) for action in response['action_plans']: self.assertEqual(audit.uuid, action['audit_uuid']) def test_many_with_audit_uuid_filter(self): action_plan_list1 = [] audit1 = obj_utils.create_test_audit( self.context, id=2, uuid=utils.generate_uuid(), name='My Audit {0}'.format(2)) for id_ in range(2, 5): action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit1.id) action_plan_list1.append(action_plan.uuid) audit2 = obj_utils.create_test_audit( self.context, id=3, uuid=utils.generate_uuid(), name='My Audit {0}'.format(3)) action_plan_list2 = [] for id_ in [5, 6, 7]: action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit2.id) action_plan_list2.append(action_plan.uuid) response = self.get_json('/action_plans?audit_uuid=%s' % audit2.uuid) self.assertEqual(len(action_plan_list2), len(response['action_plans'])) for action in response['action_plans']: self.assertEqual(audit2.uuid, action['audit_uuid']) def test_many_without_soft_deleted(self): action_plan_list = [] for id_ in [1, 2, 3]: action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) action_plan_list.append(action_plan.uuid) for id_ in [4, 5]: action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) action_plan.soft_delete() response = self.get_json('/action_plans') self.assertEqual(3, len(response['action_plans'])) uuids = [s['uuid'] for s in response['action_plans']] self.assertEqual(sorted(action_plan_list), sorted(uuids)) def test_many_with_soft_deleted(self): action_plan_list = [] for id_ in [1, 2, 3]: action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) action_plan_list.append(action_plan.uuid) for id_ in [4, 5]: action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) action_plan.soft_delete() action_plan_list.append(action_plan.uuid) response = self.get_json('/action_plans', headers={'X-Show-Deleted': 'True'}) self.assertEqual(5, len(response['action_plans'])) uuids = [s['uuid'] for s in response['action_plans']] self.assertEqual(sorted(action_plan_list), sorted(uuids)) def test_many_with_sort_key_audit_uuid(self): audit_list = [] for id_ in range(2, 5): audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit.id) audit_list.append(audit.uuid) response = self.get_json('/action_plans/?sort_key=audit_uuid') self.assertEqual(3, len(response['action_plans'])) uuids = [s['audit_uuid'] for s in response['action_plans']] self.assertEqual(sorted(audit_list), uuids) def test_sort_key_validation(self): response = self.get_json( '/action_plans?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) def test_links(self): uuid = utils.generate_uuid() obj_utils.create_test_action_plan(self.context, id=1, uuid=uuid) response = self.get_json('/action_plans/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for link in response['links']: bookmark = link['rel'] == 'bookmark' self.assertTrue(self.validate_link( link['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) response = self.get_json('/action_plans/?limit=3') self.assertEqual(3, len(response['action_plans'])) next_marker = response['action_plans'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) response = self.get_json('/action_plans') self.assertEqual(3, len(response['action_plans'])) next_marker = response['action_plans'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) self.action_plan = obj_utils.create_test_action_plan( self.context) p = mock.patch.object(db_api.BaseConnection, 'destroy_action_plan') self.mock_action_plan_delete = p.start() self.mock_action_plan_delete.side_effect = \ self._simulate_rpc_action_plan_delete self.addCleanup(p.stop) def _simulate_rpc_action_plan_delete(self, audit_uuid): action_plan = objects.ActionPlan.get_by_uuid(self.context, audit_uuid) action_plan.destroy() def test_delete_action_plan_without_action(self): response = self.delete('/action_plans/%s' % self.action_plan.uuid, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.action_plan.state = objects.action_plan.State.SUCCEEDED self.action_plan.save() self.delete('/action_plans/%s' % self.action_plan.uuid) response = self.get_json('/action_plans/%s' % self.action_plan.uuid, expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_delete_action_plan_with_action(self): action = obj_utils.create_test_action( self.context, id=1) self.action_plan.state = objects.action_plan.State.SUCCEEDED self.action_plan.save() self.delete('/action_plans/%s' % self.action_plan.uuid) ap_response = self.get_json('/action_plans/%s' % self.action_plan.uuid, expect_errors=True) acts_response = self.get_json( '/actions/?action_plan_uuid=%s' % self.action_plan.uuid) act_response = self.get_json( '/actions/%s' % action.uuid, expect_errors=True) # The action plan does not exist anymore self.assertEqual(HTTPStatus.NOT_FOUND, ap_response.status_int) self.assertEqual('application/json', ap_response.content_type) self.assertTrue(ap_response.json['error_message']) # Nor does the action self.assertEqual(0, len(acts_response['actions'])) self.assertEqual(HTTPStatus.NOT_FOUND, act_response.status_int) self.assertEqual('application/json', act_response.content_type) self.assertTrue(act_response.json['error_message']) def test_delete_action_plan_not_found(self): uuid = utils.generate_uuid() response = self.delete('/action_plans/%s' % uuid, expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestStart(api_base.FunctionalTest): def setUp(self): super(TestStart, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) self.action_plan = obj_utils.create_test_action_plan( self.context, state=objects.action_plan.State.RECOMMENDED) p = mock.patch.object(db_api.BaseConnection, 'update_action_plan') self.mock_action_plan_update = p.start() self.mock_action_plan_update.side_effect = \ self._simulate_rpc_action_plan_update self.addCleanup(p.stop) def _simulate_rpc_action_plan_update(self, action_plan): action_plan.save() return action_plan @mock.patch('watcher.common.policy.enforce') def test_start_action_plan_not_found(self, mock_policy): mock_policy.return_value = True uuid = utils.generate_uuid() response = self.post('/v1/action_plans/%s/%s' % (uuid, 'start'), expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) @mock.patch('watcher.common.policy.enforce') def test_start_action_plan(self, mock_policy): mock_policy.return_value = True action = obj_utils.create_test_action( self.context, id=1) self.action_plan.state = objects.action_plan.State.SUCCEEDED response = self.post('/v1/action_plans/%s/%s/' % (self.action_plan.uuid, 'start'), expect_errors=True) self.assertEqual(HTTPStatus.OK, response.status_int) act_response = self.get_json( '/actions/%s' % action.uuid, expect_errors=True) self.assertEqual(HTTPStatus.OK, act_response.status_int) self.assertEqual('PENDING', act_response.json['state']) self.assertEqual('application/json', act_response.content_type) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) self.action_plan = obj_utils.create_test_action_plan( self.context, state=objects.action_plan.State.RECOMMENDED) p = mock.patch.object(db_api.BaseConnection, 'update_action_plan') self.mock_action_plan_update = p.start() self.mock_action_plan_update.side_effect = \ self._simulate_rpc_action_plan_update self.addCleanup(p.stop) def _simulate_rpc_action_plan_update(self, action_plan): action_plan.save() return action_plan @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_denied(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_state = objects.action_plan.State.DELETED response = self.get_json( '/action_plans/%s' % self.action_plan.uuid) self.assertNotEqual(new_state, response['state']) response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_replace_non_existent_action_plan_denied(self): response = self.patch_json( '/action_plans/%s' % utils.generate_uuid(), [{'path': '/state', 'value': objects.action_plan.State.PENDING, 'op': 'replace'}], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_add_non_existent_property_denied(self): response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_remove_denied(self): # We should not be able to remove the state of an action plan response = self.get_json( '/action_plans/%s' % self.action_plan.uuid) self.assertIsNotNone(response['state']) response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/state', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_remove_uuid_denied(self): response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_remove_non_existent_property_denied(self): response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_code) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) @mock.patch.object(aapi.ApplierAPI, 'launch_action_plan') def test_replace_state_pending_ok(self, applier_mock): new_state = objects.action_plan.State.PENDING response = self.get_json( '/action_plans/%s' % self.action_plan.uuid) self.assertNotEqual(new_state, response['state']) response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_code) applier_mock.assert_called_once_with(mock.ANY, self.action_plan.uuid) ALLOWED_TRANSITIONS = [ {"original_state": objects.action_plan.State.RECOMMENDED, "new_state": objects.action_plan.State.PENDING}, {"original_state": objects.action_plan.State.RECOMMENDED, "new_state": objects.action_plan.State.CANCELLED}, {"original_state": objects.action_plan.State.ONGOING, "new_state": objects.action_plan.State.CANCELLING}, {"original_state": objects.action_plan.State.PENDING, "new_state": objects.action_plan.State.CANCELLED}, ] class TestPatchStateTransitionDenied(api_base.FunctionalTest): STATES = [ ap_state for ap_state in objects.action_plan.State.__dict__ if not ap_state.startswith("_") ] scenarios = [ ( "%s -> %s" % (original_state, new_state), {"original_state": original_state, "new_state": new_state}, ) for original_state, new_state in list(itertools.product(STATES, STATES)) # from DELETED to ... # NOTE: Any state transition from DELETED (To RECOMMENDED, PENDING, # ONGOING, CANCELLED, SUCCEEDED and FAILED) will cause a 404 Not Found # because we cannot retrieve them with a GET (soft_deleted state). # This is the reason why they are not listed here but they have a # special test to cover it if original_state != objects.action_plan.State.DELETED and original_state != new_state and {"original_state": original_state, "new_state": new_state} not in ALLOWED_TRANSITIONS ] def setUp(self): super(TestPatchStateTransitionDenied, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) @mock.patch.object( db_api.BaseConnection, 'update_action_plan', mock.Mock(side_effect=lambda ap: ap.save() or ap)) def test_replace_state_pending_denied(self): action_plan = obj_utils.create_test_action_plan( self.context, state=self.original_state) initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid) response = self.patch_json( '/action_plans/%s' % action_plan.uuid, [{'path': '/state', 'value': self.new_state, 'op': 'replace'}], expect_errors=True) updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid) self.assertNotEqual(self.new_state, initial_ap['state']) self.assertEqual(self.original_state, updated_ap['state']) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_code) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestPatchStateTransitionOk(api_base.FunctionalTest): scenarios = [ ( "%s -> %s" % (transition["original_state"], transition["new_state"]), transition ) for transition in ALLOWED_TRANSITIONS ] def setUp(self): super(TestPatchStateTransitionOk, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) @mock.patch.object( db_api.BaseConnection, 'update_action_plan', mock.Mock(side_effect=lambda ap: ap.save() or ap)) @mock.patch.object(aapi.ApplierAPI, 'launch_action_plan', mock.Mock()) def test_replace_state_pending_ok(self): action_plan = obj_utils.create_test_action_plan( self.context, state=self.original_state) initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid) response = self.patch_json( '/action_plans/%s' % action_plan.uuid, [{'path': '/state', 'value': self.new_state, 'op': 'replace'}]) updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid) self.assertNotEqual(self.new_state, initial_ap['state']) self.assertEqual(self.new_state, updated_ap['state']) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_code) class TestActionPlanPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestActionPlanPolicyEnforcement, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "action_plan:get_all", self.get_json, '/action_plans', expect_errors=True) def test_policy_disallow_get_one(self): action_plan = obj_utils.create_test_action_plan(self.context) self._common_policy_check( "action_plan:get", self.get_json, '/action_plans/%s' % action_plan.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "action_plan:detail", self.get_json, '/action_plans/detail', expect_errors=True) def test_policy_disallow_update(self): action_plan = obj_utils.create_test_action_plan(self.context) self._common_policy_check( "action_plan:update", self.patch_json, '/action_plans/%s' % action_plan.uuid, [{'path': '/state', 'value': objects.action_plan.State.DELETED, 'op': 'replace'}], expect_errors=True) def test_policy_disallow_delete(self): action_plan = obj_utils.create_test_action_plan(self.context) self._common_policy_check( "action_plan:delete", self.delete, '/action_plans/%s' % action_plan.uuid, expect_errors=True) class TestActionPlanPolicyEnforcementWithAdminContext(TestListActionPlan, api_base.AdminRoleTest): def setUp(self): super(TestActionPlanPolicyEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "action_plan:delete": "rule:default", "action_plan:detail": "rule:default", "action_plan:get": "rule:default", "action_plan:get_all": "rule:default", "action_plan:update": "rule:default", "action_plan:start": "rule:default"}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_audit_templates.py0000664000175000017500000011273400000000000025360 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import itertools from unittest import mock from urllib import parse as urlparse from http import HTTPStatus from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import timeutils import webtest.app as webtest_app from wsme import types as wtypes from watcher.api.controllers.v1 import audit_template as api_audit_template from watcher.common import exception from watcher.common import utils from watcher import objects from watcher.tests.api import base as api_base from watcher.tests.api import utils as api_utils from watcher.tests import base from watcher.tests.db import utils as db_utils from watcher.tests.objects import utils as obj_utils def post_get_test_audit_template(**kw): goal = db_utils.get_test_goal() strategy = db_utils.get_test_strategy(goal_id=goal['id']) kw['goal'] = kw.get('goal', goal['uuid']) kw['strategy'] = kw.get('strategy', strategy['uuid']) kw['scope'] = kw.get('scope', []) audit_template = api_utils.audit_template_post_data(**kw) return audit_template class TestAuditTemplateObject(base.TestCase): def test_audit_template_init(self): audit_template_dict = post_get_test_audit_template() del audit_template_dict['name'] audit_template = api_audit_template.AuditTemplate( **audit_template_dict) self.assertEqual(wtypes.Unset, audit_template.name) class FunctionalTestWithSetup(api_base.FunctionalTest): def setUp(self): super(FunctionalTestWithSetup, self).setUp() self.fake_goal1 = obj_utils.create_test_goal( self.context, id=1, uuid=utils.generate_uuid(), name="dummy_1") self.fake_goal2 = obj_utils.create_test_goal( self.context, id=2, uuid=utils.generate_uuid(), name="dummy_2") self.fake_strategy1 = obj_utils.create_test_strategy( self.context, id=1, uuid=utils.generate_uuid(), name="strategy_1", goal_id=self.fake_goal1.id) self.fake_strategy2 = obj_utils.create_test_strategy( self.context, id=2, uuid=utils.generate_uuid(), name="strategy_2", goal_id=self.fake_goal2.id) class TestListAuditTemplate(FunctionalTestWithSetup): def test_empty(self): response = self.get_json('/audit_templates') self.assertEqual([], response['audit_templates']) def _assert_audit_template_fields(self, audit_template): audit_template_fields = ['name', 'goal_uuid', 'goal_name', 'strategy_uuid', 'strategy_name'] for field in audit_template_fields: self.assertIn(field, audit_template) def test_one(self): audit_template = obj_utils.create_test_audit_template( self.context, strategy_id=self.fake_strategy1.id) response = self.get_json('/audit_templates') self.assertEqual(audit_template.uuid, response['audit_templates'][0]["uuid"]) self._assert_audit_template_fields(response['audit_templates'][0]) def test_get_one_soft_deleted_ok(self): audit_template = obj_utils.create_test_audit_template(self.context) audit_template.soft_delete() response = self.get_json('/audit_templates', headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit_template.uuid, response['audit_templates'][0]["uuid"]) self._assert_audit_template_fields(response['audit_templates'][0]) response = self.get_json('/audit_templates') self.assertEqual([], response['audit_templates']) def test_get_one_by_uuid(self): audit_template = obj_utils.create_test_audit_template(self.context) response = self.get_json( '/audit_templates/%s' % audit_template['uuid']) self.assertEqual(audit_template.uuid, response['uuid']) self._assert_audit_template_fields(response) def test_get_one_by_name(self): audit_template = obj_utils.create_test_audit_template(self.context) response = self.get_json(urlparse.quote( '/audit_templates/%s' % audit_template['name'])) self.assertEqual(audit_template.uuid, response['uuid']) self._assert_audit_template_fields(response) def test_get_one_soft_deleted(self): audit_template = obj_utils.create_test_audit_template(self.context) audit_template.soft_delete() response = self.get_json( '/audit_templates/%s' % audit_template['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit_template.uuid, response['uuid']) self._assert_audit_template_fields(response) response = self.get_json( '/audit_templates/%s' % audit_template['uuid'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_detail(self): audit_template = obj_utils.create_test_audit_template(self.context) response = self.get_json('/audit_templates/detail') self.assertEqual(audit_template.uuid, response['audit_templates'][0]["uuid"]) self._assert_audit_template_fields(response['audit_templates'][0]) def test_detail_soft_deleted(self): audit_template = obj_utils.create_test_audit_template(self.context) audit_template.soft_delete() response = self.get_json('/audit_templates/detail', headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit_template.uuid, response['audit_templates'][0]["uuid"]) self._assert_audit_template_fields(response['audit_templates'][0]) response = self.get_json('/audit_templates/detail') self.assertEqual([], response['audit_templates']) def test_detail_against_single(self): audit_template = obj_utils.create_test_audit_template(self.context) response = self.get_json( '/audit_templates/%s/detail' % audit_template['uuid'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_many(self): audit_template_list = [] for id_ in range(1, 6): audit_template = obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) audit_template_list.append(audit_template) response = self.get_json('/audit_templates') self.assertEqual(len(audit_template_list), len(response['audit_templates'])) uuids = [s['uuid'] for s in response['audit_templates']] self.assertEqual( sorted([at.uuid for at in audit_template_list]), sorted(uuids)) def test_many_without_soft_deleted(self): audit_template_list = [] for id_ in range(1, 6): audit_template = obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) audit_template_list.append(audit_template) # We soft delete the ones with ID 4 and 5 [at.soft_delete() for at in audit_template_list[3:]] response = self.get_json('/audit_templates') self.assertEqual(3, len(response['audit_templates'])) uuids = [s['uuid'] for s in response['audit_templates']] self.assertEqual( sorted([at.uuid for at in audit_template_list[:3]]), sorted(uuids)) def test_many_with_soft_deleted(self): audit_template_list = [] for id_ in range(1, 6): audit_template = obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) audit_template_list.append(audit_template) # We soft delete the ones with ID 4 and 5 [at.soft_delete() for at in audit_template_list[3:]] response = self.get_json('/audit_templates', headers={'X-Show-Deleted': 'True'}) self.assertEqual(5, len(response['audit_templates'])) uuids = [s['uuid'] for s in response['audit_templates']] self.assertEqual( sorted([at.uuid for at in audit_template_list]), sorted(uuids)) def test_links(self): uuid = utils.generate_uuid() obj_utils.create_test_audit_template(self.context, id=1, uuid=uuid) response = self.get_json('/audit_templates/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for link in response['links']: bookmark = link['rel'] == 'bookmark' self.assertTrue( self.validate_link(link['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) response = self.get_json('/audit_templates/?limit=3') self.assertEqual(3, len(response['audit_templates'])) next_marker = response['audit_templates'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) response = self.get_json('/audit_templates') self.assertEqual(3, len(response['audit_templates'])) next_marker = response['audit_templates'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_filter_by_goal_uuid(self): for id_, goal_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(self.fake_goal1.id, 3), itertools.repeat(self.fake_goal2.id, 2)]), 1): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_), goal_id=goal_id) response = self.get_json( '/audit_templates?goal=%s' % self.fake_goal2.uuid) self.assertEqual(2, len(response['audit_templates'])) def test_filter_by_goal_name(self): for id_, goal_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(self.fake_goal1.id, 3), itertools.repeat(self.fake_goal2.id, 2)]), 1): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_), goal_id=goal_id) response = self.get_json( '/audit_templates?goal=%s' % self.fake_goal2.name) self.assertEqual(2, len(response['audit_templates'])) def test_filter_by_strategy_uuid(self): for id_, strategy_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(self.fake_strategy1.id, 3), itertools.repeat(self.fake_strategy2.id, 2)]), 1): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_), strategy_id=strategy_id) response = self.get_json( '/audit_templates?strategy=%s' % self.fake_strategy2.uuid) self.assertEqual(2, len(response['audit_templates'])) def test_filter_by_strategy_name(self): for id_, strategy_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(self.fake_strategy1.id, 3), itertools.repeat(self.fake_strategy2.id, 2)]), 1): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_), strategy_id=strategy_id) response = self.get_json( '/audit_templates?strategy=%s' % self.fake_strategy2.name) self.assertEqual(2, len(response['audit_templates'])) def test_many_with_sort_key_name(self): audit_template_list = [] for id_ in range(1, 6): audit_template = obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) audit_template_list.append(audit_template) response = self.get_json('/audit_templates?sort_key=%s' % 'name') names = [s['name'] for s in response['audit_templates']] self.assertEqual( sorted([at.name for at in audit_template_list]), names) def test_many_with_sort_key_goal_name(self): goal_names_list = [] for id_, goal_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(self.fake_goal1.id, 3), itertools.repeat(self.fake_goal2.id, 2)]), 1): audit_template = obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_), goal_id=goal_id) goal_names_list.append(audit_template.goal.name) for direction in ['asc', 'desc']: response = self.get_json( '/audit_templates?sort_key={0}&sort_dir={1}' .format('goal_name', direction)) goal_names = [s['goal_name'] for s in response['audit_templates']] self.assertEqual( sorted(goal_names_list, reverse=(direction == 'desc')), goal_names) def test_sort_key_validation(self): response = self.get_json( '/audit_templates?sort_key=%s' % 'goal_bad_name', expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) class TestPatch(FunctionalTestWithSetup): def setUp(self): super(TestPatch, self).setUp() obj_utils.create_test_goal(self.context) self.audit_template = obj_utils.create_test_audit_template( self.context, strategy_id=None) @mock.patch.object(timeutils, 'utcnow') def test_replace_goal_uuid(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_goal_uuid = self.fake_goal2.uuid response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertNotEqual(new_goal_uuid, response['goal_uuid']) response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/goal', 'value': new_goal_uuid, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_code) response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertEqual(new_goal_uuid, response['goal_uuid']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) @mock.patch.object(timeutils, 'utcnow') def test_replace_goal_uuid_by_name(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_goal_uuid = self.fake_goal2.uuid response = self.get_json(urlparse.quote( '/audit_templates/%s' % self.audit_template.name)) self.assertNotEqual(new_goal_uuid, response['goal_uuid']) response = self.patch_json( '/audit_templates/%s' % self.audit_template.name, [{'path': '/goal', 'value': new_goal_uuid, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_code) response = self.get_json( '/audit_templates/%s' % self.audit_template.name) self.assertEqual(new_goal_uuid, response['goal_uuid']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) def test_replace_non_existent_audit_template(self): response = self.patch_json( '/audit_templates/%s' % utils.generate_uuid(), [{'path': '/goal', 'value': self.fake_goal1.uuid, 'op': 'replace'}], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_replace_invalid_goal(self): with mock.patch.object( self.dbapi, 'update_audit_template', wraps=self.dbapi.update_audit_template ) as cn_mock: response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/goal', 'value': utils.generate_uuid(), 'op': 'replace'}], expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) assert not cn_mock.called def test_add_goal_uuid(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/goal', 'value': self.fake_goal2.uuid, 'op': 'add'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_int) response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertEqual(self.fake_goal2.uuid, response['goal_uuid']) def test_add_strategy_uuid(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/strategy', 'value': self.fake_strategy1.uuid, 'op': 'add'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_int) response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertEqual(self.fake_strategy1.uuid, response['strategy_uuid']) def test_replace_strategy_uuid(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/strategy', 'value': self.fake_strategy2['uuid'], 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_int) response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertEqual( self.fake_strategy2['uuid'], response['strategy_uuid']) def test_replace_invalid_strategy(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/strategy', 'value': utils.generate_uuid(), # Does not exist 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_add_non_existent_property(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_remove_strategy(self): audit_template = obj_utils.create_test_audit_template( self.context, uuid=utils.generate_uuid(), name="AT_%s" % utils.generate_uuid(), goal_id=self.fake_goal1.id, strategy_id=self.fake_strategy1.id) response = self.get_json( '/audit_templates/%s' % audit_template.uuid) self.assertIsNotNone(response['strategy_uuid']) response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/strategy', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_code) def test_remove_goal(self): response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertIsNotNone(response['goal_uuid']) response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/goal', 'op': 'remove'}], expect_errors=True) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_code) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_remove_uuid(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_remove_non_existent_property(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_code) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestPost(FunctionalTestWithSetup): @mock.patch.object(timeutils, 'utcnow') def test_create_audit_template(self, mock_utcnow): audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=self.fake_strategy1.uuid) test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json('/audit_templates', audit_template_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) # Check location header self.assertIsNotNone(response.location) expected_location = \ '/v1/audit_templates/%s' % response.json['uuid'] self.assertEqual(urlparse.urlparse(response.location).path, expected_location) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) self.assertNotIn('updated_at', response.json.keys) self.assertNotIn('deleted_at', response.json.keys) self.assertEqual(self.fake_goal1.uuid, response.json['goal_uuid']) self.assertEqual(self.fake_strategy1.uuid, response.json['strategy_uuid']) return_created_at = timeutils.parse_isotime( response.json['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) @mock.patch.object(timeutils, 'utcnow') def test_create_audit_template_with_strategy_name(self, mock_utcnow): audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=self.fake_strategy1.name) test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json('/audit_templates', audit_template_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) # Check location header self.assertIsNotNone(response.location) expected_location = \ '/v1/audit_templates/%s' % response.json['uuid'] self.assertEqual(urlparse.urlparse(response.location).path, expected_location) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) self.assertNotIn('updated_at', response.json.keys) self.assertNotIn('deleted_at', response.json.keys) self.assertEqual(self.fake_goal1.uuid, response.json['goal_uuid']) self.assertEqual(self.fake_strategy1.uuid, response.json['strategy_uuid']) self.assertEqual(self.fake_strategy1.name, response.json['strategy_name']) return_created_at = timeutils.parse_isotime( response.json['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) def test_create_audit_template_validation_with_aggregates(self): scope = [{'compute': [{'host_aggregates': [{'id': '*'}]}, {'availability_zones': [{'name': 'AZ1'}, {'name': 'AZ2'}]}, {'exclude': [ {'instances': [ {'uuid': 'INSTANCE_1'}, {'uuid': 'INSTANCE_2'}]}, {'compute_nodes': [ {'name': 'Node_1'}, {'name': 'Node_2'}]}, {'host_aggregates': [{'id': '*'}]} ]} ] } ] audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=self.fake_strategy1.uuid, scope=scope) with self.assertRaisesRegex(webtest_app.AppError, "be included and excluded together"): self.post_json('/audit_templates', audit_template_dict) scope = [{'host_aggregates': [{'id1': '*'}]}] audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=self.fake_strategy1.uuid, scope=scope) response = self.post_json('/audit_templates', audit_template_dict, expect_errors=True) self.assertEqual(HTTPStatus.INTERNAL_SERVER_ERROR, response.status_int) def test_create_audit_template_does_autogenerate_id(self): audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=None) with mock.patch.object( self.dbapi, 'create_audit_template', wraps=self.dbapi.create_audit_template ) as cn_mock: response = self.post_json('/audit_templates', audit_template_dict) self.assertEqual(audit_template_dict['goal'], response.json['goal_uuid']) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cn_mock.call_args[0][0]) def test_create_audit_template_generate_uuid(self): audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=None) response = self.post_json('/audit_templates', audit_template_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) def test_create_audit_template_with_invalid_goal(self): with mock.patch.object( self.dbapi, 'create_audit_template', wraps=self.dbapi.create_audit_template ) as cn_mock: audit_template_dict = post_get_test_audit_template( goal_uuid=utils.generate_uuid()) response = self.post_json('/audit_templates', audit_template_dict, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) assert not cn_mock.called def test_create_audit_template_with_invalid_strategy(self): with mock.patch.object( self.dbapi, 'create_audit_template', wraps=self.dbapi.create_audit_template ) as cn_mock: audit_template_dict = post_get_test_audit_template( goal_uuid=self.fake_goal1['uuid'], strategy_uuid=utils.generate_uuid()) response = self.post_json('/audit_templates', audit_template_dict, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) assert not cn_mock.called def test_create_audit_template_with_unrelated_strategy(self): with mock.patch.object( self.dbapi, 'create_audit_template', wraps=self.dbapi.create_audit_template ) as cn_mock: audit_template_dict = post_get_test_audit_template( goal_uuid=self.fake_goal1['uuid'], strategy=self.fake_strategy2['uuid']) response = self.post_json('/audit_templates', audit_template_dict, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) assert not cn_mock.called def test_create_audit_template_with_uuid(self): with mock.patch.object( self.dbapi, 'create_audit_template', wraps=self.dbapi.create_audit_template ) as cn_mock: audit_template_dict = post_get_test_audit_template() response = self.post_json('/audit_templates', audit_template_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) assert not cn_mock.called def test_create_audit_template_with_old_scope(self): scope = [{'host_aggregates': [{'id': '*'}]}, {'availability_zones': [{'name': 'AZ1'}, {'name': 'AZ2'}]}, {'exclude': [ {'instances': [ {'uuid': 'INSTANCE_1'}, {'uuid': 'INSTANCE_2'}]}, {'compute_nodes': [ {'name': 'Node_1'}, {'name': 'Node_2'}]}, ]} ] audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=self.fake_strategy1.uuid, scope=scope) response = self.post_json('/audit_templates', audit_template_dict) self.assertEqual(HTTPStatus.CREATED, response.status_int) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() obj_utils.create_test_goal(self.context) self.audit_template = obj_utils.create_test_audit_template( self.context) @mock.patch.object(timeutils, 'utcnow') def test_delete_audit_template_by_uuid(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time self.delete(urlparse.quote('/audit_templates/%s' % self.audit_template.uuid)) response = self.get_json( urlparse.quote('/audit_templates/%s' % self.audit_template.uuid), expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertRaises(exception.AuditTemplateNotFound, objects.AuditTemplate.get_by_uuid, self.context, self.audit_template.uuid) self.context.show_deleted = True at = objects.AuditTemplate.get_by_uuid(self.context, self.audit_template.uuid) self.assertEqual(self.audit_template.name, at.name) @mock.patch.object(timeutils, 'utcnow') def test_delete_audit_template_by_name(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time self.delete(urlparse.quote('/audit_templates/%s' % self.audit_template.name)) response = self.get_json( urlparse.quote('/audit_templates/%s' % self.audit_template.name), expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertRaises(exception.AuditTemplateNotFound, objects.AuditTemplate.get_by_name, self.context, self.audit_template.name) self.context.show_deleted = True at = objects.AuditTemplate.get_by_name(self.context, self.audit_template.name) self.assertEqual(self.audit_template.uuid, at.uuid) def test_delete_audit_template_not_found(self): uuid = utils.generate_uuid() response = self.delete( '/audit_templates/%s' % uuid, expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestAuditTemplatePolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "audit_template:get_all", self.get_json, '/audit_templates', expect_errors=True) def test_policy_disallow_get_one(self): obj_utils.create_test_goal(self.context) audit_template = obj_utils.create_test_audit_template(self.context) self._common_policy_check( "audit_template:get", self.get_json, '/audit_templates/%s' % audit_template.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "audit_template:detail", self.get_json, '/audit_templates/detail', expect_errors=True) def test_policy_disallow_update(self): obj_utils.create_test_goal(self.context) audit_template = obj_utils.create_test_audit_template(self.context) self._common_policy_check( "audit_template:update", self.patch_json, '/audit_templates/%s' % audit_template.uuid, [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, 'op': 'replace'}], expect_errors=True) def test_policy_disallow_create(self): fake_goal1 = obj_utils.get_test_goal( self.context, id=1, uuid=utils.generate_uuid(), name="dummy_1") fake_goal1.create() fake_strategy1 = obj_utils.get_test_strategy( self.context, id=1, uuid=utils.generate_uuid(), name="strategy_1", goal_id=fake_goal1.id) fake_strategy1.create() audit_template_dict = post_get_test_audit_template( goal=fake_goal1.uuid, strategy=fake_strategy1.uuid) self._common_policy_check( "audit_template:create", self.post_json, '/audit_templates', audit_template_dict, expect_errors=True) def test_policy_disallow_delete(self): obj_utils.create_test_goal(self.context) audit_template = obj_utils.create_test_audit_template(self.context) self._common_policy_check( "audit_template:delete", self.delete, '/audit_templates/%s' % audit_template.uuid, expect_errors=True) class TestAuditTemplatePolicyWithAdminContext(TestListAuditTemplate, api_base.AdminRoleTest): def setUp(self): super(TestAuditTemplatePolicyWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "audit_template:create": "rule:default", "audit_template:delete": "rule:default", "audit_template:detail": "rule:default", "audit_template:get": "rule:default", "audit_template:get_all": "rule:default", "audit_template:update": "rule:default"}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_audits.py0000664000175000017500000014067300000000000023470 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from dateutil import tz import itertools from unittest import mock from urllib import parse as urlparse from http import HTTPStatus from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import timeutils from wsme import types as wtypes from watcher.api.controllers.v1 import audit as api_audit from watcher.common import utils from watcher.db import api as db_api from watcher.decision_engine import rpcapi as deapi from watcher import objects from watcher.tests.api import base as api_base from watcher.tests.api import utils as api_utils from watcher.tests import base from watcher.tests.db import utils as db_utils from watcher.tests.objects import utils as obj_utils def post_get_test_audit(**kw): audit = api_utils.audit_post_data(**kw) audit_template = db_utils.get_test_audit_template() goal = db_utils.get_test_goal() del_keys = ['goal_id', 'strategy_id'] del_keys.extend(kw.get('params_to_exclude', [])) add_keys = {'audit_template_uuid': audit_template['uuid'], 'goal': goal['uuid'], } if kw.get('use_named_goal'): add_keys['goal'] = 'TEST' for k in add_keys: audit[k] = kw.get(k, add_keys[k]) for k in del_keys: del audit[k] return audit def post_get_test_audit_with_predefined_strategy(**kw): spec = kw.pop('strategy_parameters_spec', {}) strategy_id = 2 strategy = db_utils.get_test_strategy(parameters_spec=spec, id=strategy_id) audit = api_utils.audit_post_data(**kw) audit_template = db_utils.get_test_audit_template( strategy_id=strategy['id']) del_keys = ['goal_id', 'strategy_id'] add_keys = {'audit_template_uuid': audit_template['uuid'], } for k in del_keys: del audit[k] for k in add_keys: audit[k] = kw.get(k, add_keys[k]) return audit class TestAuditObject(base.TestCase): def test_audit_init(self): audit_dict = api_utils.audit_post_data(audit_template_id=None, goal_id=None, strategy_id=None) del audit_dict['state'] audit = api_audit.Audit(**audit_dict) self.assertEqual(wtypes.Unset, audit.state) class TestListAudit(api_base.FunctionalTest): def setUp(self): super(TestListAudit, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) def test_empty(self): response = self.get_json('/audits') self.assertEqual([], response['audits']) def _assert_audit_fields(self, audit): audit_fields = ['audit_type', 'scope', 'state', 'goal_uuid', 'strategy_uuid'] for field in audit_fields: self.assertIn(field, audit) def test_one(self): audit = obj_utils.create_test_audit(self.context) response = self.get_json('/audits') self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) self._assert_audit_fields(response['audits'][0]) def test_one_soft_deleted(self): audit = obj_utils.create_test_audit(self.context) audit.soft_delete() response = self.get_json('/audits', headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) self._assert_audit_fields(response['audits'][0]) response = self.get_json('/audits') self.assertEqual([], response['audits']) def test_get_one(self): audit = obj_utils.create_test_audit(self.context) response = self.get_json('/audits/%s' % audit['uuid']) self.assertEqual(audit.uuid, response['uuid']) self._assert_audit_fields(response) def test_get_one_soft_deleted(self): audit = obj_utils.create_test_audit(self.context) audit.soft_delete() response = self.get_json('/audits/%s' % audit['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit.uuid, response['uuid']) self._assert_audit_fields(response) response = self.get_json('/audits/%s' % audit['uuid'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_detail(self): audit = obj_utils.create_test_audit(self.context) response = self.get_json('/audits/detail') self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) self._assert_audit_fields(response['audits'][0]) def test_detail_soft_deleted(self): audit = obj_utils.create_test_audit(self.context) audit.soft_delete() response = self.get_json('/audits/detail', headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) self._assert_audit_fields(response['audits'][0]) response = self.get_json('/audits/detail') self.assertEqual([], response['audits']) def test_detail_against_single(self): audit = obj_utils.create_test_audit(self.context) response = self.get_json('/audits/%s/detail' % audit['uuid'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_many(self): audit_list = [] for id_ in range(5): audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) audit_list.append(audit.uuid) response = self.get_json('/audits') self.assertEqual(len(audit_list), len(response['audits'])) uuids = [s['uuid'] for s in response['audits']] self.assertEqual(sorted(audit_list), sorted(uuids)) def test_many_without_soft_deleted(self): audit_list = [] for id_ in [1, 2, 3]: audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) audit_list.append(audit.uuid) for id_ in [4, 5]: audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) audit.soft_delete() response = self.get_json('/audits') self.assertEqual(3, len(response['audits'])) uuids = [s['uuid'] for s in response['audits']] self.assertEqual(sorted(audit_list), sorted(uuids)) def test_many_with_soft_deleted(self): audit_list = [] for id_ in [1, 2, 3]: audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) audit_list.append(audit.uuid) for id_ in [4, 5]: audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) audit.soft_delete() audit_list.append(audit.uuid) response = self.get_json('/audits', headers={'X-Show-Deleted': 'True'}) self.assertEqual(5, len(response['audits'])) uuids = [s['uuid'] for s in response['audits']] self.assertEqual(sorted(audit_list), sorted(uuids)) def test_many_with_sort_key_goal_uuid(self): goal_list = [] for id_ in range(5): goal = obj_utils.create_test_goal( self.context, name='gl{0}'.format(id_), uuid=utils.generate_uuid()) obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), goal_id=goal.id, name='My Audit {0}'.format(id_)) goal_list.append(goal.uuid) response = self.get_json('/audits/?sort_key=goal_uuid') self.assertEqual(5, len(response['audits'])) uuids = [s['goal_uuid'] for s in response['audits']] self.assertEqual(sorted(goal_list), uuids) def test_sort_key_validation(self): response = self.get_json( '/audits?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) def test_links(self): uuid = utils.generate_uuid() obj_utils.create_test_audit( self.context, id=1, uuid=uuid, name='My Audit {0}'.format(1)) response = self.get_json('/audits/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for link in response['links']: bookmark = link['rel'] == 'bookmark' self.assertTrue( self.validate_link(link['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) response = self.get_json('/audits/?limit=3') self.assertEqual(3, len(response['audits'])) next_marker = response['audits'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) response = self.get_json('/audits') self.assertEqual(3, len(response['audits'])) next_marker = response['audits'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) self.audit = obj_utils.create_test_audit(self.context) p = mock.patch.object(db_api.BaseConnection, 'update_audit') self.mock_audit_update = p.start() self.mock_audit_update.side_effect = self._simulate_rpc_audit_update self.addCleanup(p.stop) def _simulate_rpc_audit_update(self, audit): audit.save() return audit @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_state = objects.audit.State.CANCELLED response = self.get_json('/audits/%s' % self.audit.uuid) self.assertNotEqual(new_state, response['state']) response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_code) response = self.get_json('/audits/%s' % self.audit.uuid) self.assertEqual(new_state, response['state']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) def test_replace_non_existent_audit(self): response = self.patch_json( '/audits/%s' % utils.generate_uuid(), [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, 'op': 'replace'}], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_add_ok(self): new_state = objects.audit.State.SUCCEEDED response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': new_state, 'op': 'add'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_int) response = self.get_json('/audits/%s' % self.audit.uuid) self.assertEqual(new_state, response['state']) def test_add_non_existent_property(self): response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_remove_ok(self): response = self.get_json('/audits/%s' % self.audit.uuid) self.assertIsNotNone(response['interval']) response = self.patch_json('/audits/%s' % self.audit.uuid, [{'path': '/interval', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_code) response = self.get_json('/audits/%s' % self.audit.uuid) self.assertIsNone(response['interval']) def test_remove_uuid(self): response = self.patch_json('/audits/%s' % self.audit.uuid, [{'path': '/uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_remove_non_existent_property(self): response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_code) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) ALLOWED_TRANSITIONS = [ {"original_state": key, "new_state": value} for key, values in ( objects.audit.AuditStateTransitionManager.TRANSITIONS.items()) for value in values] class TestPatchStateTransitionDenied(api_base.FunctionalTest): STATES = [ ap_state for ap_state in objects.audit.State.__dict__ if not ap_state.startswith("_") ] scenarios = [ ( "%s -> %s" % (original_state, new_state), {"original_state": original_state, "new_state": new_state}, ) for original_state, new_state in list(itertools.product(STATES, STATES)) if original_state != new_state and {"original_state": original_state, "new_state": new_state} not in ALLOWED_TRANSITIONS ] def setUp(self): super(TestPatchStateTransitionDenied, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) self.audit = obj_utils.create_test_audit(self.context, state=self.original_state) p = mock.patch.object(db_api.BaseConnection, 'update_audit') self.mock_audit_update = p.start() self.mock_audit_update.side_effect = self._simulate_rpc_audit_update self.addCleanup(p.stop) def _simulate_rpc_audit_update(self, audit): audit.save() return audit def test_replace_denied(self): response = self.get_json('/audits/%s' % self.audit.uuid) self.assertNotEqual(self.new_state, response['state']) response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': self.new_state, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_code) self.assertTrue(response.json['error_message']) response = self.get_json('/audits/%s' % self.audit.uuid) self.assertEqual(self.original_state, response['state']) class TestPatchStateTransitionOk(api_base.FunctionalTest): scenarios = [ ( "%s -> %s" % (transition["original_state"], transition["new_state"]), transition ) for transition in ALLOWED_TRANSITIONS ] def setUp(self): super(TestPatchStateTransitionOk, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) self.audit = obj_utils.create_test_audit(self.context, state=self.original_state) p = mock.patch.object(db_api.BaseConnection, 'update_audit') self.mock_audit_update = p.start() self.mock_audit_update.side_effect = self._simulate_rpc_audit_update self.addCleanup(p.stop) def _simulate_rpc_audit_update(self, audit): audit.save() return audit @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.get_json('/audits/%s' % self.audit.uuid) self.assertNotEqual(self.new_state, response['state']) response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': self.new_state, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.OK, response.status_code) response = self.get_json('/audits/%s' % self.audit.uuid) self.assertEqual(self.new_state, response['state']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) p = mock.patch.object(db_api.BaseConnection, 'create_audit') self.mock_create_audit = p.start() self.mock_create_audit.side_effect = ( self._simulate_rpc_audit_create) self.addCleanup(p.stop) def _simulate_rpc_audit_create(self, audit): audit.create() return audit @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') @mock.patch('oslo_utils.timeutils.utcnow') def test_create_audit(self, mock_utcnow, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time audit_dict = post_get_test_audit( state=objects.audit.State.PENDING, params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) # Check location header self.assertIsNotNone(response.location) expected_location = '/v1/audits/%s' % response.json['uuid'] self.assertEqual(urlparse.urlparse(response.location).path, expected_location) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertNotIn('updated_at', response.json.keys) self.assertNotIn('deleted_at', response.json.keys) return_created_at = timeutils.parse_isotime( response.json['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') @mock.patch('oslo_utils.timeutils.utcnow') def test_create_audit_with_state_not_allowed(self, mock_utcnow, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time audit_dict = post_get_test_audit(state=objects.audit.State.SUCCEEDED) response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_audit_with_at_uuid_and_goal_specified(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time audit_dict = post_get_test_audit( state=objects.audit.State.PENDING, params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname']) response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_goal(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'audit_template_uuid']) response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_goal_without_strategy(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'audit_template_uuid', 'strategy']) response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_named_goal(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'audit_template_uuid'], use_named_goal=True) response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_audit_invalid_audit_template_uuid(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) # Make the audit template UUID some garbage value audit_dict['audit_template_uuid'] = ( '01234567-8910-1112-1314-151617181920') response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual("application/json", response.content_type) expected_error_msg = ('The audit template UUID or name specified is ' 'invalid') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_doesnt_contain_id(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( state=objects.audit.State.PENDING, params_to_exclude=['uuid', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) state = audit_dict['state'] del audit_dict['state'] with mock.patch.object(self.dbapi, 'create_audit', wraps=self.dbapi.create_audit) as cn_mock: response = self.post_json('/audits', audit_dict) self.assertEqual(state, response.json['state']) cn_mock.assert_called_once_with(mock.ANY) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cn_mock.call_args[0][0]) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_generate_uuid(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_with_interval(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value audit_dict['interval'] = '1200' response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertEqual(audit_dict['interval'], response.json['interval']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_with_cron_interval(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value audit_dict['interval'] = '* * * * *' response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertEqual(audit_dict['interval'], response.json['interval']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_with_wrong_interval(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value audit_dict['interval'] = 'zxc' response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.INTERNAL_SERVER_ERROR, response.status_int) # NOTE(dviroel): this error message check was shortened to try avoid # future breakages. See bug #2089866 for more details. expected_error_msg = ('columns has to be specified for iterator ' 'expression.') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_without_period(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) expected_error_msg = ('Interval of audit must be specified ' 'for CONTINUOUS.') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_oneshot_audit_with_period(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['audit_type'] = objects.audit.AuditType.ONESHOT.value response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) expected_error_msg = 'Interval of audit must not be set for ONESHOT.' self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) def test_create_audit_trigger_decision_engine(self): with mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') as de_mock: audit_dict = post_get_test_audit( state=objects.audit.State.PENDING, params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) response = self.post_json('/audits', audit_dict) de_mock.assert_called_once_with(mock.ANY, response.json['uuid']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_uuid(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) del audit_dict['scope'] response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) assert not mock_trigger_audit.called @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_parameters_no_predefined_strategy( self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( parameters={'name': 'Tom'}, params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) expected_error_msg = ('Specify parameters but no predefined ' 'strategy for audit, or no ' 'parameter spec in predefined strategy') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) assert not mock_trigger_audit.called @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_parameters_no_schema( self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit_with_predefined_strategy( parameters={'name': 'Tom'}) del audit_dict['uuid'] del audit_dict['state'] del audit_dict['interval'] del audit_dict['scope'] del audit_dict['next_run_time'] del audit_dict['hostname'] response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) expected_error_msg = ('Specify parameters but no predefined ' 'strategy for audit, or no ' 'parameter spec in predefined strategy') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) assert not mock_trigger_audit.called @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_parameter_not_allowed( self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_template = self.prepare_audit_template_strategy_with_parameter() audit_dict = api_utils.audit_post_data( parameters={'fake1': 1, 'fake2': "hello"}) audit_dict['audit_template_uuid'] = audit_template['uuid'] del_keys = ['uuid', 'goal_id', 'strategy_id', 'state', 'interval', 'scope', 'next_run_time', 'hostname'] for k in del_keys: del audit_dict[k] response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual("application/json", response.content_type) expected_error_msg = 'Audit parameter fake2 are not allowed' self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) assert not mock_trigger_audit.called def prepare_audit_template_strategy_with_parameter(self): fake_spec = { "properties": { "fake1": { "description": "number parameter example", "type": "number", "default": 3.2, "minimum": 1.0, "maximum": 10.2, } } } template_uuid = 'e74c40e0-d825-11e2-a28f-0800200c9a67' strategy_uuid = 'e74c40e0-d825-11e2-a28f-0800200c9a68' template_name = 'my template' strategy_name = 'my strategy' strategy_id = 3 strategy = db_utils.get_test_strategy(parameters_spec=fake_spec, id=strategy_id, uuid=strategy_uuid, name=strategy_name) obj_utils.create_test_strategy(self.context, parameters_spec=fake_spec, id=strategy_id, uuid=strategy_uuid, name=strategy_name) obj_utils.create_test_audit_template(self.context, strategy_id=strategy_id, uuid=template_uuid, name='name') audit_template = db_utils.get_test_audit_template( strategy_id=strategy['id'], uuid=template_uuid, name=template_name) return audit_template @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') @mock.patch('oslo_utils.timeutils.utcnow') def test_create_audit_with_name(self, mock_utcnow, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time audit_dict = post_get_test_audit( params_to_exclude=['state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) normal_name = 'this audit name is just for test' # long_name length exceeds 63 characters long_name = normal_name + audit_dict['uuid'] del audit_dict['uuid'] audit_dict['name'] = normal_name response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertEqual(normal_name, response.json['name']) audit_dict['name'] = long_name response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertNotEqual(long_name, response.json['name']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_with_start_end_time( self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY start_time = datetime.datetime(2018, 3, 1, 0, 0) end_time = datetime.datetime(2018, 4, 1, 0, 0) audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal'] ) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value audit_dict['interval'] = '1200' audit_dict['start_time'] = str(start_time) audit_dict['end_time'] = str(end_time) response = self.post_json( '/audits', audit_dict, headers={'OpenStack-API-Version': 'infra-optim 1.1'}) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertEqual(audit_dict['interval'], response.json['interval']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) return_start_time = timeutils.parse_isotime( response.json['start_time']) return_end_time = timeutils.parse_isotime( response.json['end_time']) iso_start_time = start_time.replace( tzinfo=tz.tzlocal()).astimezone(tz.tzutc()) iso_end_time = end_time.replace( tzinfo=tz.tzlocal()).astimezone(tz.tzutc()) self.assertEqual(iso_start_time, return_start_time) self.assertEqual(iso_end_time, return_end_time) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_with_start_end_time_incompatible_version( self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY start_time = datetime.datetime(2018, 3, 1, 0, 0) end_time = datetime.datetime(2018, 4, 1, 0, 0) audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal'] ) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value audit_dict['interval'] = '1200' audit_dict['start_time'] = str(start_time) audit_dict['end_time'] = str(end_time) response = self.post_json( '/audits', audit_dict, headers={'OpenStack-API-Version': 'infra-optim 1.0'}, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.NOT_ACCEPTABLE, response.status_int) expected_error_msg = 'Request not acceptable.' self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) assert not mock_trigger_audit.called @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_force_false(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) response = self.post_json( '/audits', audit_dict, headers={'OpenStack-API-Version': 'infra-optim 1.2'}) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertFalse(response.json['force']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_force_true(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['force'] = True response = self.post_json( '/audits', audit_dict, headers={'OpenStack-API-Version': 'infra-optim 1.2'}) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.CREATED, response.status_int) self.assertTrue(response.json['force']) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) self.audit = obj_utils.create_test_audit(self.context) p = mock.patch.object(db_api.BaseConnection, 'update_audit') self.mock_audit_update = p.start() self.mock_audit_update.side_effect = self._simulate_rpc_audit_update self.addCleanup(p.stop) def _simulate_rpc_audit_update(self, audit): audit.save() return audit @mock.patch('oslo_utils.timeutils.utcnow') def test_delete_audit(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_state = objects.audit.State.ONGOING self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}]) response = self.delete('/audits/%s' % self.audit.uuid, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) new_state = objects.audit.State.CANCELLED self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}]) self.delete('/audits/%s' % self.audit.uuid) response = self.get_json('/audits/%s' % self.audit.uuid, expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.context.show_deleted = True audit = objects.Audit.get_by_uuid(self.context, self.audit.uuid) return_deleted_at = \ audit['deleted_at'].strftime('%Y-%m-%dT%H:%M:%S.%f') self.assertEqual(test_time.strftime('%Y-%m-%dT%H:%M:%S.%f'), return_deleted_at) self.assertEqual(objects.audit.State.DELETED, audit['state']) def test_delete_audit_not_found(self): uuid = utils.generate_uuid() response = self.delete('/audits/%s' % uuid, expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestAuditPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestAuditPolicyEnforcement, self).setUp() obj_utils.create_test_goal(self.context) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "audit:get_all", self.get_json, '/audits', expect_errors=True) def test_policy_disallow_get_one(self): audit = obj_utils.create_test_audit(self.context) self._common_policy_check( "audit:get", self.get_json, '/audits/%s' % audit.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "audit:detail", self.get_json, '/audits/detail', expect_errors=True) def test_policy_disallow_update(self): audit = obj_utils.create_test_audit(self.context) self._common_policy_check( "audit:update", self.patch_json, '/audits/%s' % audit.uuid, [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, 'op': 'replace'}], expect_errors=True) def test_policy_disallow_create(self): audit_dict = post_get_test_audit( state=objects.audit.State.PENDING, params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal']) self._common_policy_check( "audit:create", self.post_json, '/audits', audit_dict, expect_errors=True) def test_policy_disallow_delete(self): audit = obj_utils.create_test_audit(self.context) self._common_policy_check( "audit:delete", self.delete, '/audits/%s' % audit.uuid, expect_errors=True) class TestAuditEnforcementWithAdminContext(TestListAudit, api_base.AdminRoleTest): def setUp(self): super(TestAuditEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "audit:create": "rule:default", "audit:delete": "rule:default", "audit:detail": "rule:default", "audit:get": "rule:default", "audit:get_all": "rule:default", "audit:update": "rule:default"}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_data_model.py0000664000175000017500000000626300000000000024264 0ustar00zuulzuul00000000000000# Copyright 2019 ZTE corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from http import HTTPStatus from oslo_serialization import jsonutils from watcher.decision_engine import rpcapi as deapi from watcher.tests.api import base as api_base class TestListDataModel(api_base.FunctionalTest): def setUp(self): super(TestListDataModel, self).setUp() p_dcapi = mock.patch.object(deapi, 'DecisionEngineAPI') self.mock_dcapi = p_dcapi.start() self.mock_dcapi().get_data_model_info.return_value = \ 'fake_response_value' self.addCleanup(p_dcapi.stop) def test_get_all(self): response = self.get_json( '/data_model/?data_model_type=compute', headers={'OpenStack-API-Version': 'infra-optim 1.3'}) self.assertEqual('fake_response_value', response) def test_get_all_not_acceptable(self): response = self.get_json( '/data_model/?data_model_type=compute', headers={'OpenStack-API-Version': 'infra-optim 1.2'}, expect_errors=True) self.assertEqual(HTTPStatus.NOT_ACCEPTABLE, response.status_int) class TestDataModelPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestDataModelPolicyEnforcement, self).setUp() p_dcapi = mock.patch.object(deapi, 'DecisionEngineAPI') self.mock_dcapi = p_dcapi.start() self.addCleanup(p_dcapi.stop) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "data_model:get_all", self.get_json, "/data_model/?data_model_type=compute", headers={'OpenStack-API-Version': 'infra-optim 1.3'}, expect_errors=True) class TestDataModelEnforcementWithAdminContext( TestListDataModel, api_base.AdminRoleTest): def setUp(self): super(TestDataModelEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "data_model:get_all": "rule:default"}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_goals.py0000664000175000017500000001624700000000000023303 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus from oslo_config import cfg from oslo_serialization import jsonutils from urllib import parse as urlparse from watcher.common import utils from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestListGoal(api_base.FunctionalTest): def _assert_goal_fields(self, goal): goal_fields = ['uuid', 'name', 'display_name', 'efficacy_specification'] for field in goal_fields: self.assertIn(field, goal) def test_one(self): goal = obj_utils.create_test_goal(self.context) response = self.get_json('/goals') self.assertEqual(goal.uuid, response['goals'][0]["uuid"]) self._assert_goal_fields(response['goals'][0]) def test_get_one_by_uuid(self): goal = obj_utils.create_test_goal(self.context) response = self.get_json('/goals/%s' % goal.uuid) self.assertEqual(goal.uuid, response["uuid"]) self.assertEqual(goal.name, response["name"]) self._assert_goal_fields(response) def test_get_one_by_name(self): goal = obj_utils.create_test_goal(self.context) response = self.get_json(urlparse.quote( '/goals/%s' % goal['name'])) self.assertEqual(goal.uuid, response['uuid']) self._assert_goal_fields(response) def test_get_one_soft_deleted(self): goal = obj_utils.create_test_goal(self.context) goal.soft_delete() response = self.get_json( '/goals/%s' % goal['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(goal.uuid, response['uuid']) self._assert_goal_fields(response) response = self.get_json( '/goals/%s' % goal['uuid'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_detail(self): goal = obj_utils.create_test_goal(self.context) response = self.get_json('/goals/detail') self.assertEqual(goal.uuid, response['goals'][0]["uuid"]) self._assert_goal_fields(response['goals'][0]) def test_detail_against_single(self): goal = obj_utils.create_test_goal(self.context) response = self.get_json('/goals/%s/detail' % goal.uuid, expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_many(self): goal_list = [] for idx in range(1, 6): goal = obj_utils.create_test_goal( self.context, id=idx, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(idx)) goal_list.append(goal.uuid) response = self.get_json('/goals') self.assertGreater(len(response['goals']), 2) def test_many_without_soft_deleted(self): goal_list = [] for id_ in [1, 2, 3]: goal = obj_utils.create_test_goal( self.context, id=id_, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(id_)) goal_list.append(goal.uuid) for id_ in [4, 5]: goal = obj_utils.create_test_goal( self.context, id=id_, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(id_)) goal.soft_delete() response = self.get_json('/goals') self.assertEqual(3, len(response['goals'])) uuids = [s['uuid'] for s in response['goals']] self.assertEqual(sorted(goal_list), sorted(uuids)) def test_goals_collection_links(self): for idx in range(1, 6): obj_utils.create_test_goal( self.context, id=idx, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(idx)) response = self.get_json('/goals/?limit=2') self.assertEqual(2, len(response['goals'])) def test_goals_collection_links_default_limit(self): for idx in range(1, 6): obj_utils.create_test_goal( self.context, id=idx, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(idx)) cfg.CONF.set_override('max_limit', 3, 'api') response = self.get_json('/goals') self.assertEqual(3, len(response['goals'])) def test_many_with_sort_key_uuid(self): goal_list = [] for idx in range(1, 6): goal = obj_utils.create_test_goal( self.context, id=idx, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(idx)) goal_list.append(goal.uuid) response = self.get_json('/goals/?sort_key=uuid') self.assertEqual(5, len(response['goals'])) uuids = [s['uuid'] for s in response['goals']] self.assertEqual(sorted(goal_list), uuids) def test_sort_key_validation(self): response = self.get_json( '/goals?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) class TestGoalPolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "goal:get_all", self.get_json, '/goals', expect_errors=True) def test_policy_disallow_get_one(self): goal = obj_utils.create_test_goal(self.context) self._common_policy_check( "goal:get", self.get_json, '/goals/%s' % goal.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "goal:detail", self.get_json, '/goals/detail', expect_errors=True) class TestGoalPolicyEnforcementWithAdminContext(TestListGoal, api_base.AdminRoleTest): def setUp(self): super(TestGoalPolicyEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "goal:detail": "rule:default", "goal:get_all": "rule:default", "goal:get_one": "rule:default"}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_microversions.py0000664000175000017500000001160600000000000025072 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus from watcher.api.controllers.v1 import versions from watcher.tests.api import base as api_base SERVICE_TYPE = 'infra-optim' H_MIN_VER = 'openstack-api-minimum-version' H_MAX_VER = 'openstack-api-maximum-version' H_RESP_VER = 'openstack-api-version' MIN_VER = versions.min_version_string() MAX_VER = versions.max_version_string() class TestMicroversions(api_base.FunctionalTest): controller_list_response = [ 'scoring_engines', 'audit_templates', 'audits', 'actions', 'action_plans', 'services'] def setUp(self): super(TestMicroversions, self).setUp() def test_wrong_major_version(self): response = self.get_json( '/', headers={'OpenStack-API-Version': ' '.join([SERVICE_TYPE, '10'])}, expect_errors=True, return_json=False) self.assertEqual('application/json', response.content_type) self.assertEqual(HTTPStatus.NOT_ACCEPTABLE, response.status_int) expected_error_msg = ('Invalid value for' ' OpenStack-API-Version header') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) def test_extend_initial_version_with_micro(self): response = self.get_json( '/', headers={'OpenStack-API-Version': ' '.join([SERVICE_TYPE, '1'])}, return_json=False) self.assertEqual(response.headers[H_MIN_VER], MIN_VER) self.assertEqual(response.headers[H_MAX_VER], MAX_VER) self.assertEqual(response.headers[H_RESP_VER], ' '.join([SERVICE_TYPE, MIN_VER])) self.assertTrue(all(x in response.json.keys() for x in self.controller_list_response)) def test_without_microversion(self): response = self.get_json('/', return_json=False) self.assertEqual(response.headers[H_MIN_VER], MIN_VER) self.assertEqual(response.headers[H_MAX_VER], MAX_VER) self.assertEqual(response.headers[H_RESP_VER], ' '.join([SERVICE_TYPE, MIN_VER])) self.assertTrue(all(x in response.json.keys() for x in self.controller_list_response)) def test_new_client_new_api(self): response = self.get_json( '/', headers={'OpenStack-API-Version': ' '.join([SERVICE_TYPE, '1.1'])}, return_json=False) self.assertEqual(response.headers[H_MIN_VER], MIN_VER) self.assertEqual(response.headers[H_MAX_VER], MAX_VER) self.assertEqual(response.headers[H_RESP_VER], ' '.join([SERVICE_TYPE, '1.1'])) self.assertTrue(all(x in response.json.keys() for x in self.controller_list_response)) def test_latest_microversion(self): response = self.get_json( '/', headers={'OpenStack-API-Version': ' '.join([SERVICE_TYPE, 'latest'])}, return_json=False) self.assertEqual(response.headers[H_MIN_VER], MIN_VER) self.assertEqual(response.headers[H_MAX_VER], MAX_VER) self.assertEqual(response.headers[H_RESP_VER], ' '.join([SERVICE_TYPE, MAX_VER])) self.assertTrue(all(x in response.json.keys() for x in self.controller_list_response)) def test_unsupported_version(self): response = self.get_json( '/', headers={'OpenStack-API-Version': ' '.join([SERVICE_TYPE, '1.999'])}, expect_errors=True) self.assertEqual(HTTPStatus.NOT_ACCEPTABLE, response.status_int) self.assertEqual(response.headers[H_MIN_VER], MIN_VER) self.assertEqual(response.headers[H_MAX_VER], MAX_VER) expected_error_msg = ('Version 1.999 was requested but the minor ' 'version is not supported by this service. ' 'The supported version range is') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_root.py0000664000175000017500000000127600000000000023155 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from watcher.tests.api import base as api_base class TestV1Routing(api_base.FunctionalTest): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_scoring_engines.py0000664000175000017500000001754100000000000025350 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus from oslo_config import cfg from oslo_serialization import jsonutils from watcher.common import utils from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestListScoringEngine(api_base.FunctionalTest): def _assert_scoring_engine_fields(self, scoring_engine): scoring_engine_fields = ['uuid', 'name', 'description'] for field in scoring_engine_fields: self.assertIn(field, scoring_engine) def test_one(self): scoring_engine = obj_utils.create_test_scoring_engine(self.context) response = self.get_json('/scoring_engines') self.assertEqual( scoring_engine.name, response['scoring_engines'][0]['name']) self._assert_scoring_engine_fields(response['scoring_engines'][0]) def test_get_one_soft_deleted(self): scoring_engine = obj_utils.create_test_scoring_engine(self.context) scoring_engine.soft_delete() response = self.get_json( '/scoring_engines/%s' % scoring_engine['name'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(scoring_engine.name, response['name']) self._assert_scoring_engine_fields(response) response = self.get_json( '/scoring_engines/%s' % scoring_engine['name'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_detail(self): obj_utils.create_test_goal(self.context) scoring_engine = obj_utils.create_test_scoring_engine(self.context) response = self.get_json('/scoring_engines/detail') self.assertEqual( scoring_engine.name, response['scoring_engines'][0]['name']) self._assert_scoring_engine_fields(response['scoring_engines'][0]) for scoring_engine in response['scoring_engines']: self.assertTrue( all(val is not None for key, val in scoring_engine.items() if key in ['uuid', 'name', 'description', 'metainfo'])) def test_detail_against_single(self): scoring_engine = obj_utils.create_test_scoring_engine(self.context) response = self.get_json( '/scoring_engines/%s/detail' % scoring_engine.id, expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_many(self): scoring_engine_list = [] for idx in range(1, 6): scoring_engine = obj_utils.create_test_scoring_engine( self.context, id=idx, uuid=utils.generate_uuid(), name=str(idx), description='SE_{0}'.format(idx)) scoring_engine_list.append(scoring_engine.name) response = self.get_json('/scoring_engines') self.assertEqual(5, len(response['scoring_engines'])) for scoring_engine in response['scoring_engines']: self.assertTrue( all(val is not None for key, val in scoring_engine.items() if key in ['name', 'description', 'metainfo'])) def test_many_without_soft_deleted(self): scoring_engine_list = [] for id_ in [1, 2, 3]: scoring_engine = obj_utils.create_test_scoring_engine( self.context, id=id_, uuid=utils.generate_uuid(), name=str(id_), description='SE_{0}'.format(id_)) scoring_engine_list.append(scoring_engine.name) for id_ in [4, 5]: scoring_engine = obj_utils.create_test_scoring_engine( self.context, id=id_, uuid=utils.generate_uuid(), name=str(id_), description='SE_{0}'.format(id_)) scoring_engine.soft_delete() response = self.get_json('/scoring_engines') self.assertEqual(3, len(response['scoring_engines'])) names = [s['name'] for s in response['scoring_engines']] self.assertEqual(sorted(scoring_engine_list), sorted(names)) def test_scoring_engines_collection_links(self): for idx in range(1, 6): obj_utils.create_test_scoring_engine( self.context, id=idx, uuid=utils.generate_uuid(), name=str(idx), description='SE_{0}'.format(idx)) response = self.get_json('/scoring_engines/?limit=2') self.assertEqual(2, len(response['scoring_engines'])) def test_scoring_engines_collection_links_default_limit(self): for idx in range(1, 6): obj_utils.create_test_scoring_engine( self.context, id=idx, uuid=utils.generate_uuid(), name=str(idx), description='SE_{0}'.format(idx)) cfg.CONF.set_override('max_limit', 3, 'api') response = self.get_json('/scoring_engines') self.assertEqual(3, len(response['scoring_engines'])) def test_many_with_sort_key_uuid(self): scoring_engine_list = [] for idx in range(1, 6): scoring_engine = obj_utils.create_test_scoring_engine( self.context, id=idx, uuid=utils.generate_uuid(), name=str(idx), description='SE_{0}'.format(idx)) scoring_engine_list.append(scoring_engine.uuid) response = self.get_json('/scoring_engines/?sort_key=uuid') self.assertEqual(5, len(response['scoring_engines'])) uuids = [s['uuid'] for s in response['scoring_engines']] self.assertEqual(sorted(scoring_engine_list), uuids) def test_sort_key_validation(self): response = self.get_json( '/goals?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) class TestScoringEnginePolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "scoring_engine:get_all", self.get_json, '/scoring_engines', expect_errors=True) def test_policy_disallow_get_one(self): se = obj_utils.create_test_scoring_engine(self.context) self._common_policy_check( "scoring_engine:get", self.get_json, '/scoring_engines/%s' % se.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "scoring_engine:detail", self.get_json, '/scoring_engines/detail', expect_errors=True) class TestScoringEnginePolicyEnforcementWithAdminContext( TestListScoringEngine, api_base.AdminRoleTest): def setUp(self): super(TestScoringEnginePolicyEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "scoring_engine:detail": "rule:default", "scoring_engine:get": "rule:default", "scoring_engine:get_all": "rule:default"}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_services.py0000664000175000017500000001762400000000000024021 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus from oslo_config import cfg from oslo_serialization import jsonutils from urllib import parse as urlparse from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestListService(api_base.FunctionalTest): def _assert_service_fields(self, service): service_fields = ['id', 'name', 'host', 'status'] for field in service_fields: self.assertIn(field, service) def test_one(self): service = obj_utils.create_test_service(self.context) response = self.get_json('/services') self.assertEqual(service.id, response['services'][0]["id"]) self._assert_service_fields(response['services'][0]) def test_get_one_by_id(self): service = obj_utils.create_test_service(self.context) response = self.get_json('/services/%s' % service.id) self.assertEqual(service.id, response["id"]) self.assertEqual(service.name, response["name"]) self._assert_service_fields(response) def test_get_one_by_name(self): service = obj_utils.create_test_service(self.context) response = self.get_json(urlparse.quote( '/services/%s' % service['name'])) self.assertEqual(service.id, response['id']) self._assert_service_fields(response) def test_get_one_soft_deleted(self): service = obj_utils.create_test_service(self.context) service.soft_delete() response = self.get_json( '/services/%s' % service['id'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(service.id, response['id']) self._assert_service_fields(response) response = self.get_json( '/services/%s' % service['id'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_detail(self): service = obj_utils.create_test_service(self.context) response = self.get_json('/services/detail') self.assertEqual(service.id, response['services'][0]["id"]) self._assert_service_fields(response['services'][0]) for service in response['services']: self.assertTrue( all(val is not None for key, val in service.items() if key in ['id', 'name', 'host', 'status']) ) def test_detail_against_single(self): service = obj_utils.create_test_service(self.context) response = self.get_json('/services/%s/detail' % service.id, expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_many(self): service_list = [] for idx in range(1, 4): service = obj_utils.create_test_service( self.context, id=idx, host='CONTROLLER1', name='SERVICE_{0}'.format(idx)) service_list.append(service.id) for idx in range(1, 4): service = obj_utils.create_test_service( self.context, id=3+idx, host='CONTROLLER2', name='SERVICE_{0}'.format(idx)) service_list.append(service.id) response = self.get_json('/services') self.assertEqual(6, len(response['services'])) for service in response['services']: self.assertTrue( all(val is not None for key, val in service.items() if key in ['id', 'name', 'host', 'status'])) def test_many_without_soft_deleted(self): service_list = [] for id_ in [1, 2, 3]: service = obj_utils.create_test_service( self.context, id=id_, host='CONTROLLER', name='SERVICE_{0}'.format(id_)) service_list.append(service.id) for id_ in [4, 5]: service = obj_utils.create_test_service( self.context, id=id_, host='CONTROLLER', name='SERVICE_{0}'.format(id_)) service.soft_delete() response = self.get_json('/services') self.assertEqual(3, len(response['services'])) ids = [s['id'] for s in response['services']] self.assertEqual(sorted(service_list), sorted(ids)) def test_services_collection_links(self): for idx in range(1, 6): obj_utils.create_test_service( self.context, id=idx, host='CONTROLLER', name='SERVICE_{0}'.format(idx)) response = self.get_json('/services/?limit=2') self.assertEqual(2, len(response['services'])) def test_services_collection_links_default_limit(self): for idx in range(1, 6): obj_utils.create_test_service( self.context, id=idx, host='CONTROLLER', name='SERVICE_{0}'.format(idx)) cfg.CONF.set_override('max_limit', 3, 'api') response = self.get_json('/services') self.assertEqual(3, len(response['services'])) def test_many_with_sort_key_name(self): service_list = [] for id_ in range(1, 4): service = obj_utils.create_test_service( self.context, id=id_, host='CONTROLLER', name='SERVICE_{0}'.format(id_)) service_list.append(service.name) response = self.get_json('/services/?sort_key=name') self.assertEqual(3, len(response['services'])) names = [s['name'] for s in response['services']] self.assertEqual(sorted(service_list), names) def test_sort_key_validation(self): response = self.get_json( '/services?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) class TestServicePolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "service:get_all", self.get_json, '/services', expect_errors=True) def test_policy_disallow_get_one(self): service = obj_utils.create_test_service(self.context) self._common_policy_check( "service:get", self.get_json, '/services/%s' % service.id, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "service:detail", self.get_json, '/services/detail', expect_errors=True) class TestServiceEnforcementWithAdminContext(TestListService, api_base.AdminRoleTest): def setUp(self): super(TestServiceEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "service:detail": "rule:default", "service:get": "rule:default", "service:get_all": "rule:default"}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_strategies.py0000664000175000017500000002766700000000000024360 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from urllib import parse as urlparse from http import HTTPStatus from oslo_config import cfg from oslo_serialization import jsonutils from watcher.common import utils from watcher.decision_engine import rpcapi as deapi from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestListStrategy(api_base.FunctionalTest): def setUp(self): super(TestListStrategy, self).setUp() self.fake_goal = obj_utils.create_test_goal( self.context, uuid=utils.generate_uuid()) def _assert_strategy_fields(self, strategy): strategy_fields = ['uuid', 'name', 'display_name', 'goal_uuid'] for field in strategy_fields: self.assertIn(field, strategy) @mock.patch.object(deapi.DecisionEngineAPI, 'get_strategy_info') def test_state(self, mock_strategy_info): strategy = obj_utils.create_test_strategy(self.context) mock_state = [ {"type": "Datasource", "mandatory": True, "comment": "", "state": "gnocchi: True"}, {"type": "Metrics", "mandatory": False, "comment": "", "state": [{"compute.node.cpu.percent": "available"}, {"cpu_util": "available"}]}, {"type": "CDM", "mandatory": True, "comment": "", "state": [{"compute_model": "available"}, {"storage_model": "not available"}]}, {"type": "Name", "mandatory": "", "comment": "", "state": strategy.name} ] mock_strategy_info.return_value = mock_state response = self.get_json('/strategies/%s/state' % strategy.uuid) strategy_name = [requirement["state"] for requirement in response if requirement["type"] == "Name"][0] self.assertEqual(strategy.name, strategy_name) def test_one(self): strategy = obj_utils.create_test_strategy(self.context) response = self.get_json('/strategies') self.assertEqual(strategy.uuid, response['strategies'][0]["uuid"]) self._assert_strategy_fields(response['strategies'][0]) def test_get_one_by_uuid(self): strategy = obj_utils.create_test_strategy(self.context) response = self.get_json('/strategies/%s' % strategy.uuid) self.assertEqual(strategy.uuid, response["uuid"]) self.assertEqual(strategy.name, response["name"]) self._assert_strategy_fields(response) def test_get_one_by_name(self): strategy = obj_utils.create_test_strategy(self.context) response = self.get_json(urlparse.quote( '/strategies/%s' % strategy['name'])) self.assertEqual(strategy.uuid, response['uuid']) self._assert_strategy_fields(response) def test_get_one_soft_deleted(self): strategy = obj_utils.create_test_strategy(self.context) strategy.soft_delete() response = self.get_json( '/strategies/%s' % strategy['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(strategy.uuid, response['uuid']) self._assert_strategy_fields(response) response = self.get_json( '/strategies/%s' % strategy['uuid'], expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_detail(self): strategy = obj_utils.create_test_strategy(self.context) response = self.get_json('/strategies/detail') self.assertEqual(strategy.uuid, response['strategies'][0]["uuid"]) self._assert_strategy_fields(response['strategies'][0]) for strategy in response['strategies']: self.assertTrue( all(val is not None for key, val in strategy.items() if key in ['uuid', 'name', 'display_name', 'goal_uuid'])) def test_detail_against_single(self): strategy = obj_utils.create_test_strategy(self.context) response = self.get_json('/strategies/%s/detail' % strategy.uuid, expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_many(self): strategy_list = [] for idx in range(1, 6): strategy = obj_utils.create_test_strategy( self.context, id=idx, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(idx)) strategy_list.append(strategy.uuid) response = self.get_json('/strategies') self.assertEqual(5, len(response['strategies'])) for strategy in response['strategies']: self.assertTrue( all(val is not None for key, val in strategy.items() if key in ['uuid', 'name', 'display_name', 'goal_uuid'])) def test_many_without_soft_deleted(self): strategy_list = [] for id_ in [1, 2, 3]: strategy = obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(id_)) strategy_list.append(strategy.uuid) for id_ in [4, 5]: strategy = obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(id_)) strategy.soft_delete() response = self.get_json('/strategies') self.assertEqual(3, len(response['strategies'])) uuids = [s['uuid'] for s in response['strategies']] self.assertEqual(sorted(strategy_list), sorted(uuids)) def test_strategies_collection_links(self): for idx in range(1, 6): obj_utils.create_test_strategy( self.context, id=idx, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(idx)) response = self.get_json('/strategies/?limit=2') self.assertEqual(2, len(response['strategies'])) def test_strategies_collection_links_default_limit(self): for idx in range(1, 6): obj_utils.create_test_strategy( self.context, id=idx, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(idx)) cfg.CONF.set_override('max_limit', 3, 'api') response = self.get_json('/strategies') self.assertEqual(3, len(response['strategies'])) def test_filter_by_goal_uuid(self): goal1 = obj_utils.create_test_goal( self.context, id=2, uuid=utils.generate_uuid(), name='My_Goal 1') goal2 = obj_utils.create_test_goal( self.context, id=3, uuid=utils.generate_uuid(), name='My Goal 2') for id_ in range(1, 3): obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='Goal %s' % id_, goal_id=goal1['id']) for id_ in range(3, 5): obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='Goal %s' % id_, goal_id=goal2['id']) response = self.get_json('/strategies/?goal=%s' % goal1['uuid']) strategies = response['strategies'] self.assertEqual(2, len(strategies)) for strategy in strategies: self.assertEqual(goal1['uuid'], strategy['goal_uuid']) def test_filter_by_goal_name(self): goal1 = obj_utils.create_test_goal( self.context, id=2, uuid=utils.generate_uuid(), name='My_Goal 1') goal2 = obj_utils.create_test_goal( self.context, id=3, uuid=utils.generate_uuid(), name='My Goal 2') for id_ in range(1, 3): obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='Goal %s' % id_, goal_id=goal1['id']) for id_ in range(3, 5): obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='Goal %s' % id_, goal_id=goal2['id']) response = self.get_json('/strategies/?goal=%s' % goal1['name']) strategies = response['strategies'] self.assertEqual(2, len(strategies)) for strategy in strategies: self.assertEqual(goal1['uuid'], strategy['goal_uuid']) def test_many_with_sort_key_goal_uuid(self): goals_uuid_list = [] for idx in range(1, 6): strategy = obj_utils.create_test_strategy( self.context, id=idx, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(idx)) goals_uuid_list.append(strategy.goal.uuid) response = self.get_json('/strategies/?sort_key=goal_uuid') self.assertEqual(5, len(response['strategies'])) goal_uuids = [s['goal_uuid'] for s in response['strategies']] self.assertEqual(sorted(goals_uuid_list), goal_uuids) def test_sort_key_validation(self): response = self.get_json( '/strategies?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) class TestStrategyPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestStrategyPolicyEnforcement, self).setUp() self.fake_goal = obj_utils.create_test_goal( self.context, uuid=utils.generate_uuid()) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "strategy:get_all", self.get_json, '/strategies', expect_errors=True) def test_policy_disallow_get_one(self): strategy = obj_utils.create_test_strategy(self.context) self._common_policy_check( "strategy:get", self.get_json, '/strategies/%s' % strategy.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "strategy:detail", self.get_json, '/strategies/detail', expect_errors=True) def test_policy_disallow_state(self): strategy = obj_utils.create_test_strategy(self.context) self._common_policy_check( "strategy:get", self.get_json, '/strategies/%s/state' % strategy.uuid, expect_errors=True) class TestStrategyEnforcementWithAdminContext( TestListStrategy, api_base.AdminRoleTest): def setUp(self): super(TestStrategyEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "strategy:detail": "rule:default", "strategy:get": "rule:default", "strategy:get_all": "rule:default", "strategy:state": "rule:default"}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_types.py0000664000175000017500000002214100000000000023330 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import webtest import wsme from wsme import types as wtypes from http import HTTPStatus from watcher.api.controllers.v1 import types from watcher.common import exception from watcher.common import utils from watcher.tests import base class TestUuidType(base.TestCase): def test_valid_uuid(self): test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' self.assertEqual(test_uuid, types.UuidType.validate(test_uuid)) def test_invalid_uuid(self): self.assertRaises(exception.InvalidUUID, types.UuidType.validate, 'invalid-uuid') class TestNameType(base.TestCase): def test_valid_name(self): test_name = 'hal-9000' self.assertEqual(test_name, types.NameType.validate(test_name)) def test_invalid_name(self): self.assertRaises(exception.InvalidName, types.NameType.validate, '-this is not valid-') class TestUuidOrNameType(base.TestCase): @mock.patch.object(utils, 'is_uuid_like') @mock.patch.object(utils, 'is_hostname_safe') def test_valid_uuid(self, host_mock, uuid_mock): test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' host_mock.return_value = False uuid_mock.return_value = True self.assertTrue(types.UuidOrNameType.validate(test_uuid)) uuid_mock.assert_called_once_with(test_uuid) @mock.patch.object(utils, 'is_uuid_like') @mock.patch.object(utils, 'is_hostname_safe') def test_valid_name(self, host_mock, uuid_mock): test_name = 'dc16-database5' uuid_mock.return_value = False host_mock.return_value = True self.assertTrue(types.UuidOrNameType.validate(test_name)) host_mock.assert_called_once_with(test_name) def test_invalid_uuid_or_name(self): self.assertRaises(exception.InvalidUuidOrName, types.UuidOrNameType.validate, 'inval#uuid%or*name') class MyPatchType(types.JsonPatchType): """Helper class for TestJsonPatchType tests.""" @staticmethod def mandatory_attrs(): return ['/mandatory'] @staticmethod def internal_attrs(): return ['/internal'] class MyRoot(wsme.WSRoot): """Helper class for TestJsonPatchType tests.""" @wsme.expose([wsme.types.text], body=[MyPatchType]) @wsme.validate([MyPatchType]) def test(self, patch): return patch class TestJsonPatchType(base.TestCase): def setUp(self): super(TestJsonPatchType, self).setUp() self.app = webtest.TestApp(MyRoot(['restjson']).wsgiapp()) def _patch_json(self, params, expect_errors=False): return self.app.patch_json( '/test', params=params, headers={'Accept': 'application/json'}, expect_errors=expect_errors ) def test_valid_patches(self): valid_patches = [{'path': '/extra/foo', 'op': 'remove'}, {'path': '/extra/foo', 'op': 'add', 'value': 'bar'}, {'path': '/str', 'op': 'replace', 'value': 'bar'}, {'path': '/bool', 'op': 'add', 'value': True}, {'path': '/int', 'op': 'add', 'value': 1}, {'path': '/float', 'op': 'add', 'value': 0.123}, {'path': '/list', 'op': 'add', 'value': [1, 2]}, {'path': '/none', 'op': 'add', 'value': None}, {'path': '/empty_dict', 'op': 'add', 'value': {}}, {'path': '/empty_list', 'op': 'add', 'value': []}, {'path': '/dict', 'op': 'add', 'value': {'cat': 'meow'}}] ret = self._patch_json(valid_patches, False) self.assertEqual(HTTPStatus.OK, ret.status_int) self.assertEqual(valid_patches, ret.json) def test_cannot_update_internal_attr(self): patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}] ret = self._patch_json(patch, True) self.assertEqual(HTTPStatus.BAD_REQUEST, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_cannot_update_internal_dict_attr(self): patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}] ret = self._patch_json(patch, True) self.assertEqual(HTTPStatus.BAD_REQUEST, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_mandatory_attr(self): patch = [{'op': 'replace', 'path': '/mandatory', 'value': 'foo'}] ret = self._patch_json(patch, False) self.assertEqual(HTTPStatus.OK, ret.status_int) self.assertEqual(patch, ret.json) def test_cannot_remove_mandatory_attr(self): patch = [{'op': 'remove', 'path': '/mandatory'}] ret = self._patch_json(patch, True) self.assertEqual(HTTPStatus.BAD_REQUEST, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_missing_required_fields_path(self): missing_path = [{'op': 'remove'}] ret = self._patch_json(missing_path, True) self.assertEqual(HTTPStatus.BAD_REQUEST, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_missing_required_fields_op(self): missing_op = [{'path': '/foo'}] ret = self._patch_json(missing_op, True) self.assertEqual(HTTPStatus.BAD_REQUEST, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_invalid_op(self): patch = [{'path': '/foo', 'op': 'invalid'}] ret = self._patch_json(patch, True) self.assertEqual(HTTPStatus.BAD_REQUEST, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_invalid_path(self): patch = [{'path': 'invalid-path', 'op': 'remove'}] ret = self._patch_json(patch, True) self.assertEqual(HTTPStatus.BAD_REQUEST, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_cannot_add_with_no_value(self): patch = [{'path': '/extra/foo', 'op': 'add'}] ret = self._patch_json(patch, True) self.assertEqual(HTTPStatus.BAD_REQUEST, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_cannot_replace_with_no_value(self): patch = [{'path': '/foo', 'op': 'replace'}] ret = self._patch_json(patch, True) self.assertEqual(HTTPStatus.BAD_REQUEST, ret.status_int) self.assertTrue(ret.json['faultstring']) class TestBooleanType(base.TestCase): def test_valid_true_values(self): v = types.BooleanType() self.assertTrue(v.validate("true")) self.assertTrue(v.validate("TRUE")) self.assertTrue(v.validate("True")) self.assertTrue(v.validate("t")) self.assertTrue(v.validate("1")) self.assertTrue(v.validate("y")) self.assertTrue(v.validate("yes")) self.assertTrue(v.validate("on")) def test_valid_false_values(self): v = types.BooleanType() self.assertFalse(v.validate("false")) self.assertFalse(v.validate("FALSE")) self.assertFalse(v.validate("False")) self.assertFalse(v.validate("f")) self.assertFalse(v.validate("0")) self.assertFalse(v.validate("n")) self.assertFalse(v.validate("no")) self.assertFalse(v.validate("off")) def test_invalid_value(self): v = types.BooleanType() self.assertRaises(exception.Invalid, v.validate, "invalid-value") self.assertRaises(exception.Invalid, v.validate, "01") class TestJsonType(base.TestCase): def test_valid_values(self): vt = types.jsontype value = vt.validate("hello") self.assertEqual("hello", value) value = vt.validate(10) self.assertEqual(10, value) value = vt.validate(0.123) self.assertEqual(0.123, value) value = vt.validate(True) self.assertTrue(value) value = vt.validate([1, 2, 3]) self.assertEqual([1, 2, 3], value) value = vt.validate({'foo': 'bar'}) self.assertEqual({'foo': 'bar'}, value) value = vt.validate(None) self.assertIsNone(value) def test_invalid_values(self): vt = types.jsontype self.assertRaises(exception.Invalid, vt.validate, object()) def test_apimultitype_tostring(self): vts = str(types.jsontype) self.assertIn(str(wtypes.text), vts) self.assertIn(str(int), vts) self.assertIn(str(float), vts) self.assertIn(str(types.BooleanType), vts) self.assertIn(str(list), vts) self.assertIn(str(dict), vts) self.assertIn(str(None), vts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_utils.py0000664000175000017500000000425300000000000023330 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import wsme from oslo_config import cfg from watcher.api.controllers.v1 import utils from watcher.tests import base CONF = cfg.CONF class TestApiUtils(base.TestCase): def test_validate_limit(self): limit = utils.validate_limit(10) self.assertEqual(10, 10) # max limit limit = utils.validate_limit(999999999) self.assertEqual(CONF.api.max_limit, limit) # negative self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, -1) # zero self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, 0) def test_validate_sort_dir(self): # if sort_dir is valid, nothing should happen try: utils.validate_sort_dir('asc') except Exception as exc: self.fail(exc) # invalid sort_dir parameter self.assertRaises(wsme.exc.ClientSideError, utils.validate_sort_dir, 'fake-sort') def test_validate_search_filters(self): allowed_fields = ["allowed", "authorized"] test_filters = {"allowed": 1, "authorized": 2} try: utils.validate_search_filters(test_filters, allowed_fields) except Exception as exc: self.fail(exc) def test_validate_search_filters_with_invalid_key(self): allowed_fields = ["allowed", "authorized"] test_filters = {"allowed": 1, "unauthorized": 2} self.assertRaises( wsme.exc.ClientSideError, utils.validate_search_filters, test_filters, allowed_fields) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/api/v1/test_webhooks.py0000664000175000017500000000611400000000000024007 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from http import HTTPStatus from watcher.decision_engine import rpcapi as deapi from watcher import objects from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_trigger_audit(self, mock_trigger_audit): audit = obj_utils.create_test_audit( self.context, audit_type=objects.audit.AuditType.EVENT.value) response = self.post_json( '/webhooks/%s' % audit['uuid'], {}, headers={'OpenStack-API-Version': 'infra-optim 1.4'}) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) mock_trigger_audit.assert_called_once_with( mock.ANY, audit['uuid']) def test_trigger_audit_with_no_audit(self): response = self.post_json( '/webhooks/no-audit', {}, headers={'OpenStack-API-Version': 'infra-optim 1.4'}, expect_errors=True) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_trigger_audit_with_not_allowed_audittype(self): audit = obj_utils.create_test_audit(self.context) response = self.post_json( '/webhooks/%s' % audit['uuid'], {}, headers={'OpenStack-API-Version': 'infra-optim 1.4'}, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_trigger_audit_with_not_allowed_audit_state(self): audit = obj_utils.create_test_audit( self.context, audit_type=objects.audit.AuditType.EVENT.value, state=objects.audit.State.FAILED) response = self.post_json( '/webhooks/%s' % audit['uuid'], {}, headers={'OpenStack-API-Version': 'infra-optim 1.4'}, expect_errors=True) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6431353 python_watcher-14.0.0/watcher/tests/applier/0000775000175000017500000000000000000000000021110 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/__init__.py0000664000175000017500000000000000000000000023207 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6431353 python_watcher-14.0.0/watcher/tests/applier/action_plan/0000775000175000017500000000000000000000000023377 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/action_plan/__init__.py0000664000175000017500000000000000000000000025476 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/action_plan/test_default_action_handler.py0000664000175000017500000001256000000000000031472 0ustar00zuulzuul00000000000000# Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.applier.action_plan import default from watcher.applier import default as ap_applier from watcher.common import exception from watcher import notifications from watcher import objects from watcher.objects import action_plan as ap_objects from watcher.tests.db import base from watcher.tests.objects import utils as obj_utils class TestDefaultActionPlanHandler(base.DbTestCase): class FakeApplierException(Exception): pass def setUp(self): super(TestDefaultActionPlanHandler, self).setUp() p_action_plan_notifications = mock.patch.object( notifications, 'action_plan', autospec=True) self.m_action_plan_notifications = p_action_plan_notifications.start() self.addCleanup(p_action_plan_notifications.stop) obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy(self.context) self.audit = obj_utils.create_test_audit( self.context, strategy_id=self.strategy.id) self.action_plan = obj_utils.create_test_action_plan( self.context, audit_id=self.audit.id, strategy_id=self.strategy.id) self.action = obj_utils.create_test_action( self.context, action_plan_id=self.action_plan.id, action_type='nop', input_parameters={'message': 'hello World'}) @mock.patch.object(objects.ActionPlan, "get_by_uuid") def test_launch_action_plan(self, m_get_action_plan): m_get_action_plan.return_value = self.action_plan command = default.DefaultActionPlanHandler( self.context, mock.MagicMock(), self.action_plan.uuid) command.execute() expected_calls = [ mock.call(self.context, self.action_plan, action=objects.fields.NotificationAction.EXECUTION, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.action_plan, action=objects.fields.NotificationAction.EXECUTION, phase=objects.fields.NotificationPhase.END)] self.assertEqual(ap_objects.State.SUCCEEDED, self.action_plan.state) self.assertEqual( expected_calls, self.m_action_plan_notifications .send_action_notification .call_args_list) @mock.patch.object(ap_applier.DefaultApplier, "execute") @mock.patch.object(objects.ActionPlan, "get_by_uuid") def test_launch_action_plan_with_error(self, m_get_action_plan, m_execute): m_get_action_plan.return_value = self.action_plan m_execute.side_effect = self.FakeApplierException command = default.DefaultActionPlanHandler( self.context, mock.MagicMock(), self.action_plan.uuid) command.execute() expected_calls = [ mock.call(self.context, self.action_plan, action=objects.fields.NotificationAction.EXECUTION, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.action_plan, action=objects.fields.NotificationAction.EXECUTION, priority=objects.fields.NotificationPriority.ERROR, phase=objects.fields.NotificationPhase.ERROR)] self.assertEqual(ap_objects.State.FAILED, self.action_plan.state) self.assertEqual( expected_calls, self.m_action_plan_notifications .send_action_notification .call_args_list) @mock.patch.object(objects.ActionPlan, "get_by_uuid") def test_cancel_action_plan(self, m_get_action_plan): m_get_action_plan.return_value = self.action_plan self.action_plan.state = ap_objects.State.CANCELLED self.action_plan.save() command = default.DefaultActionPlanHandler( self.context, mock.MagicMock(), self.action_plan.uuid) command.execute() action = self.action.get_by_uuid(self.context, self.action.uuid) self.assertEqual(ap_objects.State.CANCELLED, self.action_plan.state) self.assertEqual(objects.action.State.CANCELLED, action.state) @mock.patch.object(ap_applier.DefaultApplier, "execute") @mock.patch.object(objects.ActionPlan, "get_by_uuid") def test_cancel_action_plan_with_exception(self, m_get_action_plan, m_execute): m_get_action_plan.return_value = self.action_plan m_execute.side_effect = exception.ActionPlanCancelled( self.action_plan.uuid) command = default.DefaultActionPlanHandler( self.context, mock.MagicMock(), self.action_plan.uuid) command.execute() self.assertEqual(ap_objects.State.CANCELLED, self.action_plan.state) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6431353 python_watcher-14.0.0/watcher/tests/applier/actions/0000775000175000017500000000000000000000000022550 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/actions/__init__.py0000664000175000017500000000000000000000000024647 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6431353 python_watcher-14.0.0/watcher/tests/applier/actions/loading/0000775000175000017500000000000000000000000024165 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/actions/loading/__init__.py0000664000175000017500000000000000000000000026264 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/actions/loading/test_default_actions_loader.py0000664000175000017500000000212500000000000032270 0ustar00zuulzuul00000000000000# Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.applier.actions import base as abase from watcher.applier.loading import default from watcher.tests import base class TestDefaultActionLoader(base.TestCase): def setUp(self): super(TestDefaultActionLoader, self).setUp() self.loader = default.DefaultActionLoader() def test_endpoints(self): for endpoint in self.loader.list_available(): loaded = self.loader.load(endpoint) self.assertIsNotNone(loaded) self.assertIsInstance(loaded, abase.BaseAction) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/actions/test_change_node_power_state.py0000664000175000017500000001415300000000000031033 0ustar00zuulzuul00000000000000# Copyright (c) 2017 ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import jsonschema from watcher.applier.actions import base as baction from watcher.applier.actions import change_node_power_state from watcher.common.metal_helper import constants as m_constants from watcher.common.metal_helper import factory as m_helper_factory from watcher.tests import base from watcher.tests.decision_engine import fake_metal_helper COMPUTE_NODE = "compute-1" class TestChangeNodePowerState(base.TestCase): def setUp(self): super(TestChangeNodePowerState, self).setUp() p_m_factory = mock.patch.object(m_helper_factory, 'get_helper') m_factory = p_m_factory.start() self._metal_helper = m_factory.return_value self.addCleanup(p_m_factory.stop) # Let's avoid unnecessary sleep calls while running the test. p_sleep = mock.patch('time.sleep') p_sleep.start() self.addCleanup(p_sleep.stop) self.input_parameters = { baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, "state": m_constants.PowerState.ON.value, } self.action = change_node_power_state.ChangeNodePowerState( mock.Mock()) self.action.input_parameters = self.input_parameters def test_parameters_down(self): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, self.action.STATE: m_constants.PowerState.OFF.value} self.assertTrue(self.action.validate_parameters()) def test_parameters_up(self): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, self.action.STATE: m_constants.PowerState.ON.value} self.assertTrue(self.action.validate_parameters()) def test_parameters_exception_wrong_state(self): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, self.action.STATE: 'error'} self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_resource_id_empty(self): self.action.input_parameters = { self.action.STATE: m_constants.PowerState.ON.value, } self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_applies_add_extra(self): self.action.input_parameters = {"extra": "failed"} self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_change_service_state_pre_condition(self): try: self.action.pre_condition() except Exception as exc: self.fail(exc) def test_change_node_state_post_condition(self): try: self.action.post_condition() except Exception as exc: self.fail(exc) def test_execute_node_service_state_with_poweron_target(self): self.action.input_parameters["state"] = ( m_constants.PowerState.ON.value) mock_nodes = [ fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.OFF), fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.ON) ] self._metal_helper.get_node.side_effect = mock_nodes result = self.action.execute() self.assertTrue(result) mock_nodes[0].set_power_state.assert_called_once_with( m_constants.PowerState.ON.value) def test_execute_change_node_state_with_poweroff_target(self): self.action.input_parameters["state"] = ( m_constants.PowerState.OFF.value) mock_nodes = [ fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.ON), fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.ON), fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.OFF) ] self._metal_helper.get_node.side_effect = mock_nodes result = self.action.execute() self.assertTrue(result) mock_nodes[0].set_power_state.assert_called_once_with( m_constants.PowerState.OFF.value) def test_revert_change_node_state_with_poweron_target(self): self.action.input_parameters["state"] = ( m_constants.PowerState.ON.value) mock_nodes = [ fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.ON), fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.ON), fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.OFF) ] self._metal_helper.get_node.side_effect = mock_nodes self.action.revert() mock_nodes[0].set_power_state.assert_called_once_with( m_constants.PowerState.OFF.value) def test_revert_change_node_state_with_poweroff_target(self): self.action.input_parameters["state"] = ( m_constants.PowerState.OFF.value) mock_nodes = [ fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.OFF), fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.ON) ] self._metal_helper.get_node.side_effect = mock_nodes self.action.revert() mock_nodes[0].set_power_state.assert_called_once_with( m_constants.PowerState.ON.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/actions/test_change_nova_service_state.py0000664000175000017500000001204200000000000031350 0ustar00zuulzuul00000000000000# Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import jsonschema from watcher.applier.actions import base as baction from watcher.applier.actions import change_nova_service_state from watcher.common import clients from watcher.common import nova_helper from watcher.decision_engine.model import element from watcher.tests import base class TestChangeNovaServiceState(base.TestCase): def setUp(self): super(TestChangeNovaServiceState, self).setUp() self.m_osc_cls = mock.Mock() self.m_helper_cls = mock.Mock() self.m_helper = mock.Mock(spec=nova_helper.NovaHelper) self.m_helper_cls.return_value = self.m_helper self.m_osc = mock.Mock(spec=clients.OpenStackClients) self.m_osc_cls.return_value = self.m_osc m_openstack_clients = mock.patch.object( clients, "OpenStackClients", self.m_osc_cls) m_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.m_helper_cls) m_openstack_clients.start() m_nova_helper.start() self.addCleanup(m_openstack_clients.stop) self.addCleanup(m_nova_helper.stop) self.input_parameters = { "resource_name": "compute-1", "state": element.ServiceState.ENABLED.value, } self.action = change_nova_service_state.ChangeNovaServiceState( mock.Mock()) self.action.input_parameters = self.input_parameters def test_parameters_down(self): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: "compute-1", self.action.STATE: element.ServiceState.DISABLED.value} self.assertTrue(self.action.validate_parameters()) def test_parameters_up(self): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: "compute-1", self.action.STATE: element.ServiceState.ENABLED.value} self.assertTrue(self.action.validate_parameters()) def test_parameters_exception_wrong_state(self): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: "compute-1", self.action.STATE: 'error'} self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_resource_id_empty(self): self.action.input_parameters = { self.action.STATE: element.ServiceState.ENABLED.value, } self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_applies_add_extra(self): self.action.input_parameters = {"extra": "failed"} self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_change_service_state_pre_condition(self): try: self.action.pre_condition() except Exception as exc: self.fail(exc) def test_change_service_state_post_condition(self): try: self.action.post_condition() except Exception as exc: self.fail(exc) def test_execute_change_service_state_with_enable_target(self): self.action.execute() self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper.enable_service_nova_compute.assert_called_once_with( "compute-1") def test_execute_change_service_state_with_disable_target(self): self.action.input_parameters["state"] = ( element.ServiceState.DISABLED.value) self.action.input_parameters["disabled_reason"] = ( "watcher_disabled") self.action.execute() self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper.disable_service_nova_compute.assert_called_once_with( "compute-1", "watcher_disabled") def test_revert_change_service_state_with_enable_target(self): self.action.input_parameters["disabled_reason"] = ( "watcher_disabled") self.action.revert() self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper.disable_service_nova_compute.assert_called_once_with( "compute-1", "watcher_disabled") def test_revert_change_service_state_with_disable_target(self): self.action.input_parameters["state"] = ( element.ServiceState.DISABLED.value) self.action.revert() self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper.enable_service_nova_compute.assert_called_once_with( "compute-1") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/actions/test_migration.py0000664000175000017500000002071600000000000026160 0ustar00zuulzuul00000000000000# Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import jsonschema from watcher.applier.actions import base as baction from watcher.applier.actions import migration from watcher.common import clients from watcher.common import exception from watcher.common import nova_helper from watcher.tests import base class TestMigration(base.TestCase): INSTANCE_UUID = "45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba" def setUp(self): super(TestMigration, self).setUp() self.m_osc_cls = mock.Mock() self.m_helper_cls = mock.Mock() self.m_helper = mock.Mock(spec=nova_helper.NovaHelper) self.m_helper_cls.return_value = self.m_helper self.m_osc = mock.Mock(spec=clients.OpenStackClients) self.m_osc_cls.return_value = self.m_osc m_openstack_clients = mock.patch.object( clients, "OpenStackClients", self.m_osc_cls) m_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.m_helper_cls) m_openstack_clients.start() m_nova_helper.start() self.addCleanup(m_openstack_clients.stop) self.addCleanup(m_nova_helper.stop) self.input_parameters = { "migration_type": "live", "source_node": "compute1-hostname", "destination_node": "compute2-hostname", baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, } self.action = migration.Migrate(mock.Mock()) self.action.input_parameters = self.input_parameters self.input_parameters_cold = { "migration_type": "cold", "source_node": "compute1-hostname", "destination_node": "compute2-hostname", baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, } self.action_cold = migration.Migrate(mock.Mock()) self.action_cold.input_parameters = self.input_parameters_cold def test_parameters(self): params = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, self.action.MIGRATION_TYPE: 'live', self.action.DESTINATION_NODE: 'compute-2', self.action.SOURCE_NODE: 'compute-3'} self.action.input_parameters = params self.assertTrue(self.action.validate_parameters()) def test_parameters_cold(self): params = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, self.action.MIGRATION_TYPE: 'cold', self.action.DESTINATION_NODE: 'compute-2', self.action.SOURCE_NODE: 'compute-3'} self.action_cold.input_parameters = params self.assertTrue(self.action_cold.validate_parameters()) def test_parameters_exception_empty_fields(self): parameters = {baction.BaseAction.RESOURCE_ID: None, 'migration_type': None, 'source_node': None, 'destination_node': None} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_exception_migration_type(self): parameters = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, 'migration_type': 'unknown', 'source_node': 'compute-2', 'destination_node': 'compute-3'} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_exception_source_node(self): parameters = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, 'migration_type': 'live', 'source_node': None, 'destination_node': 'compute-3'} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_destination_node_none(self): parameters = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, 'migration_type': 'live', 'source_node': 'compute-1', 'destination_node': None} self.action.input_parameters = parameters self.assertTrue(self.action.validate_parameters) def test_parameters_exception_resource_id(self): parameters = {baction.BaseAction.RESOURCE_ID: "EFEF", 'migration_type': 'live', 'source_node': 'compute-2', 'destination_node': 'compute-3'} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_migration_pre_condition(self): try: self.action.pre_condition() except Exception as exc: self.fail(exc) def test_migration_post_condition(self): try: self.action.post_condition() except Exception as exc: self.fail(exc) def test_execute_live_migration_invalid_instance(self): self.m_helper.find_instance.return_value = None exc = self.assertRaises( exception.InstanceNotFound, self.action.execute) self.m_helper.find_instance.assert_called_once_with(self.INSTANCE_UUID) self.assertEqual(self.INSTANCE_UUID, exc.kwargs["name"]) def test_execute_cold_migration_invalid_instance(self): self.m_helper.find_instance.return_value = None exc = self.assertRaises( exception.InstanceNotFound, self.action_cold.execute) self.m_helper.find_instance.assert_called_once_with(self.INSTANCE_UUID) self.assertEqual(self.INSTANCE_UUID, exc.kwargs["name"]) def test_execute_live_migration(self): self.m_helper.find_instance.return_value = self.INSTANCE_UUID try: self.action.execute() except Exception as exc: self.fail(exc) self.m_helper.live_migrate_instance.assert_called_once_with( instance_id=self.INSTANCE_UUID, dest_hostname="compute2-hostname") def test_execute_cold_migration(self): self.m_helper.find_instance.return_value = self.INSTANCE_UUID try: self.action_cold.execute() except Exception as exc: self.fail(exc) self.m_helper.watcher_non_live_migrate_instance.\ assert_called_once_with( instance_id=self.INSTANCE_UUID, dest_hostname="compute2-hostname" ) def test_revert_live_migration(self): self.m_helper.find_instance.return_value = self.INSTANCE_UUID self.action.revert() self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper.live_migrate_instance.assert_called_once_with( instance_id=self.INSTANCE_UUID, dest_hostname="compute1-hostname" ) def test_revert_cold_migration(self): self.m_helper.find_instance.return_value = self.INSTANCE_UUID self.action_cold.revert() self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper.watcher_non_live_migrate_instance.\ assert_called_once_with( instance_id=self.INSTANCE_UUID, dest_hostname="compute1-hostname" ) def test_abort_live_migrate(self): migration = mock.MagicMock() migration.id = "2" migrations = [migration] self.m_helper.get_running_migration.return_value = migrations self.m_helper.find_instance.return_value = self.INSTANCE_UUID try: self.action.abort() except Exception as exc: self.fail(exc) self.m_helper.abort_live_migrate.assert_called_once_with( instance_id=self.INSTANCE_UUID, source="compute1-hostname", destination="compute2-hostname") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/actions/test_resize.py0000664000175000017500000000661600000000000025473 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from unittest import mock from watcher.applier.actions import base as baction from watcher.applier.actions import resize from watcher.common import clients from watcher.common import nova_helper from watcher.tests import base class TestResize(base.TestCase): INSTANCE_UUID = "94ae2f92-b7fd-4da7-9e97-f13504ae98c4" def setUp(self): super(TestResize, self).setUp() self.r_osc_cls = mock.Mock() self.r_helper_cls = mock.Mock() self.r_helper = mock.Mock(spec=nova_helper.NovaHelper) self.r_helper_cls.return_value = self.r_helper self.r_osc = mock.Mock(spec=clients.OpenStackClients) self.r_osc_cls.return_value = self.r_osc r_openstack_clients = mock.patch.object( clients, "OpenStackClients", self.r_osc_cls) r_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.r_helper_cls) r_openstack_clients.start() r_nova_helper.start() self.addCleanup(r_openstack_clients.stop) self.addCleanup(r_nova_helper.stop) self.input_parameters = { "flavor": "x1", baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, } self.action = resize.Resize(mock.Mock()) self.action.input_parameters = self.input_parameters def test_parameters(self): params = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, self.action.FLAVOR: 'x1'} self.action.input_parameters = params self.assertTrue(self.action.validate_parameters()) def test_parameters_exception_empty_fields(self): parameters = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, self.action.FLAVOR: None} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_exception_flavor(self): parameters = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, self.action.FLAVOR: None} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_exception_resource_id(self): parameters = {baction.BaseAction.RESOURCE_ID: "EFEF", self.action.FLAVOR: 'x1'} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_execute_resize(self): self.r_helper.find_instance.return_value = self.INSTANCE_UUID self.action.execute() self.r_helper.resize_instance.assert_called_once_with( instance_id=self.INSTANCE_UUID, flavor='x1') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/actions/test_sleep.py0000664000175000017500000000314500000000000025274 0ustar00zuulzuul00000000000000# Copyright (c) 2016 b<>com # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import jsonschema from unittest import mock from watcher.applier.actions import sleep from watcher.tests import base class TestSleep(base.TestCase): def setUp(self): super(TestSleep, self).setUp() self.s = sleep.Sleep(mock.Mock()) def test_parameters_duration(self): self.s.input_parameters = {self.s.DURATION: 1.0} self.assertTrue(self.s.validate_parameters()) def test_parameters_duration_empty(self): self.s.input_parameters = {self.s.DURATION: None} self.assertRaises(jsonschema.ValidationError, self.s.validate_parameters) def test_parameters_wrong_parameter(self): self.s.input_parameters = {self.s.DURATION: "ef"} self.assertRaises(jsonschema.ValidationError, self.s.validate_parameters) def test_parameters_add_field(self): self.s.input_parameters = {self.s.DURATION: 1.0, "not_required": "nop"} self.assertRaises(jsonschema.ValidationError, self.s.validate_parameters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/actions/test_volume_migration.py0000664000175000017500000002141500000000000027544 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import jsonschema from watcher.applier.actions import base as baction from watcher.applier.actions import volume_migration from watcher.common import cinder_helper from watcher.common import clients from watcher.common import keystone_helper from watcher.common import nova_helper from watcher.common import utils as w_utils from watcher.tests import base class TestMigration(base.TestCase): VOLUME_UUID = "45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba" INSTANCE_UUID = "45a37aec-85ab-4dda-a303-7d9f62c2f5bb" def setUp(self): super(TestMigration, self).setUp() self.m_osc_cls = mock.Mock() self.m_osc = mock.Mock(spec=clients.OpenStackClients) self.m_osc_cls.return_value = self.m_osc self.m_n_helper_cls = mock.Mock() self.m_n_helper = mock.Mock(spec=nova_helper.NovaHelper) self.m_n_helper_cls.return_value = self.m_n_helper self.m_c_helper_cls = mock.Mock() self.m_c_helper = mock.Mock(spec=cinder_helper.CinderHelper) self.m_c_helper_cls.return_value = self.m_c_helper self.m_k_helper_cls = mock.Mock() self.m_k_helper = mock.Mock(spec=keystone_helper.KeystoneHelper) self.m_k_helper_cls.return_value = self.m_k_helper m_openstack_clients = mock.patch.object( clients, "OpenStackClients", self.m_osc_cls) m_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.m_n_helper_cls) m_cinder_helper = mock.patch.object( cinder_helper, "CinderHelper", self.m_c_helper_cls) m_keystone_helper = mock.patch.object( keystone_helper, "KeystoneHelper", self.m_k_helper_cls) m_openstack_clients.start() m_nova_helper.start() m_cinder_helper.start() m_keystone_helper.start() self.addCleanup(m_keystone_helper.stop) self.addCleanup(m_cinder_helper.stop) self.addCleanup(m_nova_helper.stop) self.addCleanup(m_openstack_clients.stop) self.action = volume_migration.VolumeMigrate(mock.Mock()) self.input_parameters_swap = { "migration_type": "swap", "destination_node": "storage1-poolname", "destination_type": "storage1-typename", baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, } self.action_swap = volume_migration.VolumeMigrate(mock.Mock()) self.action_swap.input_parameters = self.input_parameters_swap self.input_parameters_migrate = { "migration_type": "migrate", "destination_node": "storage1-poolname", "destination_type": "", baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, } self.action_migrate = volume_migration.VolumeMigrate(mock.Mock()) self.action_migrate.input_parameters = self.input_parameters_migrate self.input_parameters_retype = { "migration_type": "retype", "destination_node": "", "destination_type": "storage1-typename", baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, } self.action_retype = volume_migration.VolumeMigrate(mock.Mock()) self.action_retype.input_parameters = self.input_parameters_retype @staticmethod def fake_volume(**kwargs): volume = mock.MagicMock() volume.id = kwargs.get('id', TestMigration.VOLUME_UUID) volume.size = kwargs.get('size', '1') volume.status = kwargs.get('status', 'available') volume.snapshot_id = kwargs.get('snapshot_id', None) volume.availability_zone = kwargs.get('availability_zone', 'nova') return volume @staticmethod def fake_instance(**kwargs): instance = mock.MagicMock() instance.id = kwargs.get('id', TestMigration.INSTANCE_UUID) instance.status = kwargs.get('status', 'ACTIVE') return instance def test_parameters_swap(self): params = {baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, self.action.MIGRATION_TYPE: 'swap', self.action.DESTINATION_NODE: None, self.action.DESTINATION_TYPE: 'type-1'} self.action_swap.input_parameters = params self.assertTrue(self.action_swap.validate_parameters) def test_parameters_migrate(self): params = {baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, self.action.MIGRATION_TYPE: 'migrate', self.action.DESTINATION_NODE: 'node-1', self.action.DESTINATION_TYPE: None} self.action_migrate.input_parameters = params self.assertTrue(self.action_migrate.validate_parameters) def test_parameters_retype(self): params = {baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, self.action.MIGRATION_TYPE: 'retype', self.action.DESTINATION_NODE: None, self.action.DESTINATION_TYPE: 'type-1'} self.action_retype.input_parameters = params self.assertTrue(self.action_retype.validate_parameters) def test_parameters_exception_resource_id(self): params = {baction.BaseAction.RESOURCE_ID: "EFEF", self.action.MIGRATION_TYPE: 'swap', self.action.DESTINATION_NODE: None, self.action.DESTINATION_TYPE: 'type-1'} self.action_swap.input_parameters = params self.assertRaises(jsonschema.ValidationError, self.action_swap.validate_parameters) def test_migrate_success(self): volume = self.fake_volume() self.m_c_helper.get_volume.return_value = volume result = self.action_migrate.execute() self.assertTrue(result) self.m_c_helper.migrate.assert_called_once_with( volume, "storage1-poolname" ) def test_retype_success(self): volume = self.fake_volume() self.m_c_helper.get_volume.return_value = volume result = self.action_retype.execute() self.assertTrue(result) self.m_c_helper.retype.assert_called_once_with( volume, "storage1-typename", ) def test_swap_success(self): volume = self.fake_volume( status='in-use', attachments=[{'server_id': 'server_id'}]) self.m_n_helper.find_instance.return_value = self.fake_instance() new_volume = self.fake_volume(id=w_utils.generate_uuid()) user = mock.Mock() session = mock.MagicMock() self.m_k_helper.create_user.return_value = user self.m_k_helper.create_session.return_value = session self.m_c_helper.get_volume.return_value = volume self.m_c_helper.create_volume.return_value = new_volume result = self.action_swap.execute() self.assertTrue(result) self.m_n_helper.swap_volume.assert_called_once_with( volume, new_volume ) self.m_k_helper.delete_user.assert_called_once_with(user) def test_swap_fail(self): # _can_swap fail instance = self.fake_instance(status='STOPPED') self.m_n_helper.find_instance.return_value = instance result = self.action_swap.execute() self.assertFalse(result) def test_can_swap_success(self): volume = self.fake_volume( status='in-use', attachments=[{'server_id': 'server_id'}]) instance = self.fake_instance() self.m_n_helper.find_instance.return_value = instance result = self.action_swap._can_swap(volume) self.assertTrue(result) instance = self.fake_instance(status='PAUSED') self.m_n_helper.find_instance.return_value = instance result = self.action_swap._can_swap(volume) self.assertTrue(result) instance = self.fake_instance(status='RESIZED') self.m_n_helper.find_instance.return_value = instance result = self.action_swap._can_swap(volume) self.assertTrue(result) def test_can_swap_fail(self): volume = self.fake_volume( status='in-use', attachments=[{'server_id': 'server_id'}]) instance = self.fake_instance(status='STOPPED') self.m_n_helper.find_instance.return_value = instance result = self.action_swap._can_swap(volume) self.assertFalse(result) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6431353 python_watcher-14.0.0/watcher/tests/applier/messaging/0000775000175000017500000000000000000000000023065 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/messaging/__init__.py0000664000175000017500000000000000000000000025164 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/messaging/test_trigger_action_plan_endpoint.py0000664000175000017500000000250700000000000032414 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock from watcher.applier.messaging import trigger from watcher.common import utils from watcher.tests import base class TestTriggerActionPlan(base.TestCase): def __init__(self, *args, **kwds): super(TestTriggerActionPlan, self).__init__(*args, **kwds) self.applier = mock.MagicMock() self.endpoint = trigger.TriggerActionPlan(self.applier) def test_launch_action_plan(self): action_plan_uuid = utils.generate_uuid() expected_uuid = self.endpoint.launch_action_plan(self.context, action_plan_uuid) self.assertEqual(expected_uuid, action_plan_uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/test_applier_manager.py0000664000175000017500000000276700000000000025663 0ustar00zuulzuul00000000000000# Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock import oslo_messaging as om from watcher.applier import manager as applier_manager from watcher.common import service from watcher.tests import base class TestApplierManager(base.TestCase): def setUp(self): super(TestApplierManager, self).setUp() p_heartbeat = mock.patch.object( service.ServiceHeartbeat, "send_beat") self.m_heartbeat = p_heartbeat.start() self.addCleanup(p_heartbeat.stop) self.applier = service.Service(applier_manager.ApplierManager) @mock.patch.object(om.rpc.server.RPCServer, "stop") @mock.patch.object(om.rpc.server.RPCServer, "start") def test_start(self, m_messaging_start, m_messaging_stop): self.applier.start() self.applier.stop() self.assertEqual(1, m_messaging_start.call_count) self.assertEqual(1, m_messaging_stop.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/test_rpcapi.py0000664000175000017500000000366000000000000024004 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock import oslo_messaging as om from watcher.applier import rpcapi from watcher.common import exception from watcher.common import utils from watcher.tests import base class TestApplierAPI(base.TestCase): api = rpcapi.ApplierAPI() def test_get_api_version(self): with mock.patch.object(om.RPCClient, 'call') as mock_call: expected_context = self.context self.api.check_api_version(expected_context) mock_call.assert_called_once_with( expected_context, 'check_api_version', api_version=rpcapi.ApplierAPI().API_VERSION) def test_execute_action_plan_without_error(self): with mock.patch.object(om.RPCClient, 'cast') as mock_cast: action_plan_uuid = utils.generate_uuid() self.api.launch_action_plan(self.context, action_plan_uuid) mock_cast.assert_called_once_with( self.context, 'launch_action_plan', action_plan_uuid=action_plan_uuid) def test_execute_action_plan_throw_exception(self): action_plan_uuid = "uuid" self.assertRaises(exception.InvalidUuidOrName, self.api.launch_action_plan, action_plan_uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/test_sync.py0000664000175000017500000000651200000000000023501 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2018 SBCloud # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_config import cfg from oslo_utils import uuidutils from watcher.applier import sync from watcher.decision_engine.strategy.strategies import dummy_strategy from watcher.tests.db import base as db_base from watcher import notifications from watcher import objects from watcher.tests.objects import utils as obj_utils class TestCancelOngoingActionPlans(db_base.DbTestCase): def setUp(self): super(TestCancelOngoingActionPlans, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.goal = obj_utils.create_test_goal( self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) self.strategy = obj_utils.create_test_strategy( self.context, name=dummy_strategy.DummyStrategy.get_name(), goal_id=self.goal.id) audit_template = obj_utils.create_test_audit_template( self.context, strategy_id=self.strategy.id) self.audit = obj_utils.create_test_audit( self.context, id=999, name='My Audit 999', uuid=uuidutils.generate_uuid(), audit_template_id=audit_template.id, goal_id=self.goal.id, audit_type=objects.audit.AuditType.ONESHOT.value, goal=self.goal, hostname='hostname1', state=objects.audit.State.ONGOING) self.actionplan = obj_utils.create_test_action_plan( self.context, state=objects.action_plan.State.ONGOING, audit_id=999, hostname='hostname1') self.action = obj_utils.create_test_action( self.context, action_plan_id=1, state=objects.action.State.PENDING) cfg.CONF.set_override("host", "hostname1") @mock.patch.object(objects.action.Action, 'save') @mock.patch.object(objects.action_plan.ActionPlan, 'save') @mock.patch.object(objects.action.Action, 'list') @mock.patch.object(objects.action_plan.ActionPlan, 'list') def test_cancel_ongoing_actionplans(self, m_plan_list, m_action_list, m_plan_save, m_action_save): m_plan_list.return_value = [self.actionplan] m_action_list.return_value = [self.action] syncer = sync.Syncer() syncer._cancel_ongoing_actionplans(self.context) m_plan_list.assert_called() m_action_list.assert_called() m_plan_save.assert_called() m_action_save.assert_called() self.assertEqual(self.action.state, objects.action.State.CANCELLED) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6431353 python_watcher-14.0.0/watcher/tests/applier/workflow_engine/0000775000175000017500000000000000000000000024307 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/workflow_engine/__init__.py0000664000175000017500000000000000000000000026406 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6431353 python_watcher-14.0.0/watcher/tests/applier/workflow_engine/loading/0000775000175000017500000000000000000000000025724 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/workflow_engine/loading/__init__.py0000664000175000017500000000000000000000000030023 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/workflow_engine/loading/test_default_engine_loader.py0000664000175000017500000000220700000000000033635 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.applier.loading import default from watcher.applier.workflow_engine import base as wbase from watcher.tests import base class TestDefaultActionLoader(base.TestCase): def setUp(self): super(TestDefaultActionLoader, self).setUp() self.loader = default.DefaultWorkFlowEngineLoader() def test_endpoints(self): for endpoint in self.loader.list_available(): loaded = self.loader.load(endpoint) self.assertIsNotNone(loaded) self.assertIsInstance(loaded, wbase.BaseWorkFlowEngine) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/workflow_engine/test_default_workflow_engine.py0000664000175000017500000004026700000000000032634 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from unittest import mock from watcher.applier.actions import base as abase from watcher.applier.actions import factory from watcher.applier.workflow_engine import default as tflow from watcher.common import exception from watcher.common import utils from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.objects import utils as obj_utils class ExpectedException(Exception): pass class FakeAction(abase.BaseAction, metaclass=abc.ABCMeta): def schema(self): pass def post_condition(self): pass def pre_condition(self): pass def revert(self): pass def execute(self): return False def get_description(self): return "fake action, just for test" class TestDefaultWorkFlowEngine(base.DbTestCase): def setUp(self): super(TestDefaultWorkFlowEngine, self).setUp() self.engine = tflow.DefaultWorkFlowEngine( config=mock.Mock(), context=self.context, applier_manager=mock.MagicMock()) self.engine.config.max_workers = 2 @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch('taskflow.engines.load') @mock.patch('taskflow.patterns.graph_flow.Flow.link') def test_execute(self, graph_flow, engines, m_actionplan, m_strategy): actions = mock.MagicMock() try: self.engine.execute(actions) self.assertTrue(engines.called) except Exception as exc: self.fail(exc) def create_action(self, action_type, parameters, parents=None, uuid=None, state=None): action = { 'uuid': uuid or utils.generate_uuid(), 'action_plan_id': 0, 'action_type': action_type, 'input_parameters': parameters, 'state': objects.action.State.PENDING, 'parents': parents or [], } new_action = objects.Action(self.context, **action) with mock.patch.object(notifications.action, 'send_create'): new_action.create() return new_action def check_action_state(self, action, expected_state): to_check = objects.Action.get_by_uuid(self.context, action.uuid) self.assertEqual(expected_state, to_check.state) def check_actions_state(self, actions, expected_state): for a in actions: self.check_action_state(a, expected_state) @mock.patch('taskflow.engines.load') @mock.patch('taskflow.patterns.graph_flow.Flow.link') def test_execute_with_no_actions(self, graph_flow, engines): actions = [] try: self.engine.execute(actions) self.assertFalse(graph_flow.called) self.assertTrue(engines.called) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_with_one_action(self, mock_send_update, mock_execution_notification, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [self.create_action("nop", {'message': 'test'})] try: self.engine.execute(actions) self.check_actions_state(actions, objects.action.State.SUCCEEDED) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_nop_sleep(self, mock_send_update, mock_execution_notification, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [] first_nop = self.create_action("nop", {'message': 'test'}) second_nop = self.create_action("nop", {'message': 'second test'}) sleep = self.create_action("sleep", {'duration': 0.0}, parents=[first_nop.uuid, second_nop.uuid]) actions.extend([first_nop, second_nop, sleep]) try: self.engine.execute(actions) self.check_actions_state(actions, objects.action.State.SUCCEEDED) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_with_parents(self, mock_send_update, mock_execution_notification, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [] first_nop = self.create_action( "nop", {'message': 'test'}, uuid='bc7eee5c-4fbe-4def-9744-b539be55aa19') second_nop = self.create_action( "nop", {'message': 'second test'}, uuid='0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23') first_sleep = self.create_action( "sleep", {'duration': 0.0}, parents=[first_nop.uuid, second_nop.uuid], uuid='be436531-0da3-4dad-a9c0-ea1d2aff6496') second_sleep = self.create_action( "sleep", {'duration': 0.0}, parents=[first_sleep.uuid], uuid='9eb51e14-936d-4d12-a500-6ba0f5e0bb1c') actions.extend([first_nop, second_nop, first_sleep, second_sleep]) expected_nodes = [ {'uuid': 'bc7eee5c-4fbe-4def-9744-b539be55aa19', 'input_parameters': {u'message': u'test'}, 'action_plan_id': 0, 'state': u'PENDING', 'parents': [], 'action_type': u'nop', 'id': 1}, {'uuid': '0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23', 'input_parameters': {u'message': u'second test'}, 'action_plan_id': 0, 'state': u'PENDING', 'parents': [], 'action_type': u'nop', 'id': 2}, {'uuid': 'be436531-0da3-4dad-a9c0-ea1d2aff6496', 'input_parameters': {u'duration': 0.0}, 'action_plan_id': 0, 'state': u'PENDING', 'parents': [u'bc7eee5c-4fbe-4def-9744-b539be55aa19', u'0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23'], 'action_type': u'sleep', 'id': 3}, {'uuid': '9eb51e14-936d-4d12-a500-6ba0f5e0bb1c', 'input_parameters': {u'duration': 0.0}, 'action_plan_id': 0, 'state': u'PENDING', 'parents': [u'be436531-0da3-4dad-a9c0-ea1d2aff6496'], 'action_type': u'sleep', 'id': 4}] expected_edges = [ ('action_type:nop uuid:0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23', 'action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496'), ('action_type:nop uuid:bc7eee5c-4fbe-4def-9744-b539be55aa19', 'action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496'), ('action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496', 'action_type:sleep uuid:9eb51e14-936d-4d12-a500-6ba0f5e0bb1c')] try: flow = self.engine.execute(actions) actual_nodes = sorted([x[0]._db_action.as_dict() for x in flow.iter_nodes()], key=lambda x: x['id']) for expected, actual in zip(expected_nodes, actual_nodes): for key in expected.keys(): self.assertIn(expected[key], actual.values()) actual_edges = [(u.name, v.name) for (u, v, _) in flow.iter_links()] for edge in expected_edges: self.assertIn(edge, actual_edges) self.check_actions_state(actions, objects.action.State.SUCCEEDED) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_with_two_actions(self, m_send_update, m_execution, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [] second = self.create_action("sleep", {'duration': 0.0}) first = self.create_action("nop", {'message': 'test'}) actions.append(first) actions.append(second) try: self.engine.execute(actions) self.check_actions_state(actions, objects.action.State.SUCCEEDED) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_with_three_actions(self, m_send_update, m_execution, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [] third = self.create_action("nop", {'message': 'next'}) second = self.create_action("sleep", {'duration': 0.0}) first = self.create_action("nop", {'message': 'hello'}) self.check_action_state(first, objects.action.State.PENDING) self.check_action_state(second, objects.action.State.PENDING) self.check_action_state(third, objects.action.State.PENDING) actions.append(first) actions.append(second) actions.append(third) try: self.engine.execute(actions) self.check_actions_state(actions, objects.action.State.SUCCEEDED) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_with_exception(self, m_send_update, m_execution, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [] third = self.create_action("no_exist", {'message': 'next'}) second = self.create_action("sleep", {'duration': 0.0}) first = self.create_action("nop", {'message': 'hello'}) self.check_action_state(first, objects.action.State.PENDING) self.check_action_state(second, objects.action.State.PENDING) self.check_action_state(third, objects.action.State.PENDING) actions.append(first) actions.append(second) actions.append(third) self.engine.execute(actions) self.check_action_state(first, objects.action.State.SUCCEEDED) self.check_action_state(second, objects.action.State.SUCCEEDED) self.check_action_state(third, objects.action.State.FAILED) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') @mock.patch.object(factory.ActionFactory, "make_action") def test_execute_with_action_failed(self, m_make_action, m_send_update, m_send_execution, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [self.create_action("fake_action", {})] m_make_action.return_value = FakeAction(mock.Mock()) self.engine.execute(actions) self.check_action_state(actions[0], objects.action.State.FAILED) @mock.patch.object(objects.ActionPlan, "get_by_uuid") def test_execute_with_action_plan_cancel(self, m_get_actionplan): obj_utils.create_test_goal(self.context) strategy = obj_utils.create_test_strategy(self.context) audit = obj_utils.create_test_audit( self.context, strategy_id=strategy.id) action_plan = obj_utils.create_test_action_plan( self.context, audit_id=audit.id, strategy_id=strategy.id, state=objects.action_plan.State.CANCELLING) action1 = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, action_type='nop', state=objects.action.State.SUCCEEDED, input_parameters={'message': 'hello World'}) action2 = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, action_type='nop', state=objects.action.State.ONGOING, uuid='9eb51e14-936d-4d12-a500-6ba0f5e0bb1c', input_parameters={'message': 'hello World'}) action3 = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, action_type='nop', state=objects.action.State.PENDING, uuid='bc7eee5c-4fbe-4def-9744-b539be55aa19', input_parameters={'message': 'hello World'}) m_get_actionplan.return_value = action_plan actions = [] actions.append(action1) actions.append(action2) actions.append(action3) self.assertRaises(exception.ActionPlanCancelled, self.engine.execute, actions) try: self.check_action_state(action1, objects.action.State.SUCCEEDED) self.check_action_state(action2, objects.action.State.CANCELLED) self.check_action_state(action3, objects.action.State.CANCELLED) except Exception as exc: self.fail(exc) def test_decider(self): # execution_rule is ALWAYS self.engine.execution_rule = 'ALWAYS' history = {'action1': True} self.assertTrue(self.engine.decider(history)) history = {'action1': False} self.assertTrue(self.engine.decider(history)) # execution_rule is ANY self.engine.execution_rule = 'ANY' history = {'action1': True} self.assertFalse(self.engine.decider(history)) history = {'action1': False} self.assertTrue(self.engine.decider(history)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/applier/workflow_engine/test_taskflow_action_container.py0000664000175000017500000001516600000000000033162 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import eventlet from unittest import mock from oslo_config import cfg from watcher.applier.workflow_engine import default as tflow from watcher.common import clients from watcher.common import nova_helper from watcher import objects from watcher.tests.db import base from watcher.tests.objects import utils as obj_utils class TestTaskFlowActionContainer(base.DbTestCase): def setUp(self): super(TestTaskFlowActionContainer, self).setUp() self.engine = tflow.DefaultWorkFlowEngine( config=mock.Mock(), context=self.context, applier_manager=mock.MagicMock()) obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy(self.context) self.audit = obj_utils.create_test_audit( self.context, strategy_id=self.strategy.id) def test_execute(self): action_plan = obj_utils.create_test_action_plan( self.context, audit_id=self.audit.id, strategy_id=self.strategy.id, state=objects.action_plan.State.ONGOING) action = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, state=objects.action.State.ONGOING, action_type='nop', input_parameters={'message': 'hello World'}) action_container = tflow.TaskFlowActionContainer( db_action=action, engine=self.engine) action_container.execute() obj_action = objects.Action.get_by_uuid( self.engine.context, action.uuid) self.assertEqual(obj_action.state, objects.action.State.SUCCEEDED) @mock.patch.object(clients.OpenStackClients, 'nova', mock.Mock()) def test_execute_with_failed(self): nova_util = nova_helper.NovaHelper() instance = "31b9dd5c-b1fd-4f61-9b68-a47096326dac" nova_util.nova.servers.get.return_value = instance action_plan = obj_utils.create_test_action_plan( self.context, audit_id=self.audit.id, strategy_id=self.strategy.id, state=objects.action_plan.State.ONGOING) action = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, state=objects.action.State.ONGOING, action_type='migrate', input_parameters={"resource_id": instance, "migration_type": "live", "destination_node": "host2", "source_node": "host1"}) action_container = tflow.TaskFlowActionContainer( db_action=action, engine=self.engine) result = action_container.execute() self.assertFalse(result) obj_action = objects.Action.get_by_uuid( self.engine.context, action.uuid) self.assertEqual(obj_action.state, objects.action.State.FAILED) @mock.patch('eventlet.spawn') def test_execute_with_cancel_action_plan(self, mock_eventlet_spawn): action_plan = obj_utils.create_test_action_plan( self.context, audit_id=self.audit.id, strategy_id=self.strategy.id, state=objects.action_plan.State.CANCELLING) action = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, state=objects.action.State.ONGOING, action_type='nop', input_parameters={'message': 'hello World'}) action_container = tflow.TaskFlowActionContainer( db_action=action, engine=self.engine) def empty_test(): pass et = eventlet.spawn(empty_test) mock_eventlet_spawn.return_value = et action_container.execute() et.kill.assert_called_with() @mock.patch('watcher.applier.workflow_engine.default.LOG') def test_execute_without_rollback(self, mock_log): action_plan = obj_utils.create_test_action_plan( self.context, audit_id=self.audit.id, strategy_id=self.strategy.id, state=objects.action_plan.State.ONGOING) action = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, state=objects.action.State.FAILED, action_type='nop', input_parameters={'message': 'hello World'}) action_container = tflow.TaskFlowActionContainer( db_action=action, engine=self.engine) cfg.CONF.set_override("rollback_when_actionplan_failed", False, group="watcher_applier") action_name = "action_type:{0} uuid:{1}".format(action.action_type, action.uuid) expected_log = ('Failed actionplan rollback option is turned off, ' 'and the following action will be skipped: %s') action_container.revert() mock_log.info.assert_called_once_with(expected_log, action_name) @mock.patch('watcher.applier.workflow_engine.default.LOG') def test_execute_with_rollback(self, mock_log): action_plan = obj_utils.create_test_action_plan( self.context, audit_id=self.audit.id, strategy_id=self.strategy.id, state=objects.action_plan.State.ONGOING) action = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, state=objects.action.State.FAILED, action_type='nop', input_parameters={'message': 'hello World'}) action_container = tflow.TaskFlowActionContainer( db_action=action, engine=self.engine) cfg.CONF.set_override("rollback_when_actionplan_failed", True, group="watcher_applier") action_name = "action_type:{0} uuid:{1}".format(action.action_type, action.uuid) expected_log = 'Revert action: %s' action_container.revert() mock_log.warning.assert_called_once_with(expected_log, action_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/base.py0000664000175000017500000001120400000000000020736 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os from unittest import mock from oslo_config import cfg from oslo_log import log from oslo_messaging import conffixture from oslotest import base import pecan from pecan import testing import testscenarios from watcher.common import context as watcher_context from watcher.common import service from watcher.objects import base as objects_base from watcher.tests import conf_fixture from watcher.tests import policy_fixture CONF = cfg.CONF try: log.register_options(CONF) except cfg.ArgsAlreadyParsedError: pass CONF.set_override('use_stderr', False) class BaseTestCase(testscenarios.WithScenarios, base.BaseTestCase): """Test base class.""" def setUp(self): super(BaseTestCase, self).setUp() self.addCleanup(cfg.CONF.reset) class TestCase(BaseTestCase): """Test case base class for all unit tests.""" def setUp(self): super(TestCase, self).setUp() self.useFixture(conf_fixture.ConfReloadFixture()) self.policy = self.useFixture(policy_fixture.PolicyFixture()) self.messaging_conf = self.useFixture(conffixture.ConfFixture(CONF)) self.messaging_conf.transport_url = 'fake:/' cfg.CONF.set_override("auth_type", "admin_token", group='keystone_authtoken') app_config_path = os.path.join(os.path.dirname(__file__), 'config.py') self.app = testing.load_test_app(app_config_path) self.token_info = { 'token': { 'project': { 'id': 'fake_project' }, 'user': { 'id': 'fake_user' } } } objects_base.WatcherObject.indirection_api = None self.context = watcher_context.RequestContext( auth_token_info=self.token_info, project_id='fake_project', user_id='fake_user') self.policy = self.useFixture(policy_fixture.PolicyFixture()) def make_context(*args, **kwargs): # If context hasn't been constructed with token_info if not kwargs.get('auth_token_info'): kwargs['auth_token_info'] = copy.deepcopy(self.token_info) if not kwargs.get('project_id'): kwargs['project_id'] = 'fake_project' if not kwargs.get('user_id'): kwargs['user_id'] = 'fake_user' context = watcher_context.RequestContext(*args, **kwargs) return watcher_context.RequestContext.from_dict(context.to_dict()) p = mock.patch.object(watcher_context, 'make_context', side_effect=make_context) self.mock_make_context = p.start() self.addCleanup(p.stop) self.useFixture(conf_fixture.ConfFixture(cfg.CONF)) self._reset_singletons() self._base_test_obj_backup = copy.copy( objects_base.WatcherObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) self.addCleanup(self._reset_singletons) def _reset_singletons(self): service.Singleton._instances.clear() def reset_pecan(): pecan.set_config({}, overwrite=True) self.addCleanup(reset_pecan) def _restore_obj_registry(self): objects_base.WatcherObjectRegistry._registry._obj_classes = ( self._base_test_obj_backup) def config(self, **kw): """Override config options for a test.""" group = kw.pop('group', None) for k, v in kw.items(): CONF.set_override(k, v, group) def get_path(self, project_file=None): """Get the absolute path to a file. Used for testing the API. :param project_file: File whose path to return. Default: None. :returns: path to the specified file, or path to project root. """ root = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..')) if project_file: return os.path.join(root, project_file) else: return root ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6431353 python_watcher-14.0.0/watcher/tests/cmd/0000775000175000017500000000000000000000000020217 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/cmd/__init__.py0000664000175000017500000000000000000000000022316 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/cmd/test_api.py0000664000175000017500000000422700000000000022406 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import types from unittest import mock from oslo_config import cfg from oslo_service import wsgi from pecan.testing import load_test_app from watcher.api import config as api_config from watcher.cmd import api from watcher.common import service from watcher.tests import base class TestApi(base.BaseTestCase): def setUp(self): super(TestApi, self).setUp() self.conf = cfg.CONF self._parse_cli_opts = self.conf._parse_cli_opts def _fake_parse(self, args=[]): return cfg.ConfigOpts._parse_cli_opts(self, []) _fake_parse_method = types.MethodType(_fake_parse, self.conf) self.conf._parse_cli_opts = _fake_parse_method def tearDown(self): super(TestApi, self).tearDown() self.conf._parse_cli_opts = self._parse_cli_opts @mock.patch.object(wsgi, "Server", mock.Mock()) @mock.patch("watcher.api.app.pecan.make_app") @mock.patch.object(service, "launch") def test_run_api_app(self, m_launcher, m_make_app): m_make_app.return_value = load_test_app(config=api_config.PECAN_CONFIG) api.main() self.assertEqual(1, m_launcher.call_count) @mock.patch.object(wsgi, "Server", mock.Mock()) @mock.patch("watcher.api.app.pecan.make_app") @mock.patch.object(service, "launch") def test_run_api_app_serve_specific_address(self, m_launcher, m_make_app): cfg.CONF.set_default("host", "localhost", group="api") m_make_app.return_value = load_test_app(config=api_config.PECAN_CONFIG) api.main() self.assertEqual(1, m_launcher.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/cmd/test_applier.py0000664000175000017500000000341300000000000023265 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import types from unittest import mock from oslo_config import cfg from oslo_service import service from watcher.applier import sync from watcher.cmd import applier from watcher.common import service as watcher_service from watcher.tests import base class TestApplier(base.BaseTestCase): def setUp(self): super(TestApplier, self).setUp() self.conf = cfg.CONF self._parse_cli_opts = self.conf._parse_cli_opts def _fake_parse(self, args=[]): return cfg.ConfigOpts._parse_cli_opts(self, []) _fake_parse_method = types.MethodType(_fake_parse, self.conf) self.conf._parse_cli_opts = _fake_parse_method p_heartbeat = mock.patch.object( watcher_service.ServiceHeartbeat, "send_beat") self.m_heartbeat = p_heartbeat.start() self.addCleanup(p_heartbeat.stop) def tearDown(self): super(TestApplier, self).tearDown() self.conf._parse_cli_opts = self._parse_cli_opts @mock.patch.object(sync.Syncer, "sync", mock.Mock()) @mock.patch.object(service, "launch") def test_run_applier_app(self, m_launch): applier.main() self.assertEqual(1, m_launch.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/cmd/test_db_manage.py0000664000175000017500000001646500000000000023541 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from unittest import mock from oslo_config import cfg from watcher.cmd import dbmanage from watcher.db import migration from watcher.db import purge from watcher.tests import base class TestDBManageRunApp(base.TestCase): scenarios = ( ("upgrade", {"command": "upgrade", "expected": "upgrade"}), ("downgrade", {"command": "downgrade", "expected": "downgrade"}), ("revision", {"command": "revision", "expected": "revision"}), ("stamp", {"command": "stamp", "expected": "stamp"}), ("version", {"command": "version", "expected": "version"}), ("create_schema", {"command": "create_schema", "expected": "create_schema"}), ("purge", {"command": "purge", "expected": "purge"}), ("no_param", {"command": None, "expected": "upgrade"}), ) @mock.patch.object(dbmanage, "register_sub_command_opts", mock.Mock()) @mock.patch("watcher.cmd.dbmanage.service.prepare_service") @mock.patch("watcher.cmd.dbmanage.sys") def test_run_db_manage_app(self, m_sys, m_prepare_service): # Patch command function m_func = mock.Mock() cfg.CONF.register_opt(cfg.SubCommandOpt("command")) cfg.CONF.command.func = m_func # Only append if the command is not None m_sys.argv = list(filter(None, ["watcher-db-manage", self.command])) dbmanage.main() self.assertEqual(1, m_func.call_count) m_prepare_service.assert_called_once_with( ["watcher-db-manage", self.expected], cfg.CONF) class TestDBManageRunCommand(base.TestCase): @mock.patch.object(migration, "upgrade") def test_run_db_upgrade(self, m_upgrade): cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") cfg.CONF.set_default("revision", "dummy", group="command") dbmanage.DBCommand.upgrade() m_upgrade.assert_called_once_with("dummy") @mock.patch.object(migration, "downgrade") def test_run_db_downgrade(self, m_downgrade): cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") cfg.CONF.set_default("revision", "dummy", group="command") dbmanage.DBCommand.downgrade() m_downgrade.assert_called_once_with("dummy") @mock.patch.object(migration, "revision") def test_run_db_revision(self, m_revision): cfg.CONF.register_opt(cfg.StrOpt("message"), group="command") cfg.CONF.register_opt(cfg.StrOpt("autogenerate"), group="command") cfg.CONF.set_default( "message", "dummy_message", group="command" ) cfg.CONF.set_default( "autogenerate", "dummy_autogenerate", group="command" ) dbmanage.DBCommand.revision() m_revision.assert_called_once_with( "dummy_message", "dummy_autogenerate" ) @mock.patch.object(migration, "stamp") def test_run_db_stamp(self, m_stamp): cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") cfg.CONF.set_default("revision", "dummy", group="command") dbmanage.DBCommand.stamp() @mock.patch.object(migration, "version") def test_run_db_version(self, m_version): dbmanage.DBCommand.version() self.assertEqual(1, m_version.call_count) @mock.patch.object(purge, "PurgeCommand") def test_run_db_purge(self, m_purge_cls): m_purge = mock.Mock() m_purge_cls.return_value = m_purge m_purge_cls.get_goal_uuid.return_value = 'Some UUID' cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") cfg.CONF.set_default("age_in_days", None, group="command") cfg.CONF.set_default("max_number", None, group="command") cfg.CONF.set_default("goal", None, group="command") cfg.CONF.set_default("exclude_orphans", True, group="command") cfg.CONF.set_default("dry_run", False, group="command") dbmanage.DBCommand.purge() m_purge_cls.assert_called_once_with( None, None, 'Some UUID', True, False) m_purge.execute.assert_called_once_with() @mock.patch.object(sys, "exit") @mock.patch.object(purge, "PurgeCommand") def test_run_db_purge_negative_max_number(self, m_purge_cls, m_exit): m_purge = mock.Mock() m_purge_cls.return_value = m_purge m_purge_cls.get_goal_uuid.return_value = 'Some UUID' cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") cfg.CONF.set_default("age_in_days", None, group="command") cfg.CONF.set_default("max_number", -1, group="command") cfg.CONF.set_default("goal", None, group="command") cfg.CONF.set_default("exclude_orphans", True, group="command") cfg.CONF.set_default("dry_run", False, group="command") dbmanage.DBCommand.purge() self.assertEqual(0, m_purge_cls.call_count) self.assertEqual(0, m_purge.execute.call_count) self.assertEqual(0, m_purge.do_delete.call_count) self.assertEqual(1, m_exit.call_count) @mock.patch.object(sys, "exit") @mock.patch.object(purge, "PurgeCommand") def test_run_db_purge_dry_run(self, m_purge_cls, m_exit): m_purge = mock.Mock() m_purge_cls.return_value = m_purge m_purge_cls.get_goal_uuid.return_value = 'Some UUID' cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") cfg.CONF.set_default("age_in_days", None, group="command") cfg.CONF.set_default("max_number", None, group="command") cfg.CONF.set_default("goal", None, group="command") cfg.CONF.set_default("exclude_orphans", True, group="command") cfg.CONF.set_default("dry_run", True, group="command") dbmanage.DBCommand.purge() m_purge_cls.assert_called_once_with( None, None, 'Some UUID', True, True) self.assertEqual(1, m_purge.execute.call_count) self.assertEqual(0, m_purge.do_delete.call_count) self.assertEqual(0, m_exit.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/cmd/test_decision_engine.py0000664000175000017500000000410200000000000024747 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import types from unittest import mock from oslo_config import cfg from oslo_service import service from watcher.cmd import decisionengine from watcher.common import service as watcher_service from watcher.decision_engine.audit import continuous from watcher.decision_engine import sync from watcher.tests import base class TestDecisionEngine(base.BaseTestCase): def setUp(self): super(TestDecisionEngine, self).setUp() self.conf = cfg.CONF self._parse_cli_opts = self.conf._parse_cli_opts def _fake_parse(self, args=[]): return cfg.ConfigOpts._parse_cli_opts(self, []) _fake_parse_method = types.MethodType(_fake_parse, self.conf) self.conf._parse_cli_opts = _fake_parse_method p_heartbeat = mock.patch.object( watcher_service.ServiceHeartbeat, "send_beat") self.m_heartbeat = p_heartbeat.start() self.addCleanup(p_heartbeat.stop) p_continuoushandler = mock.patch.object( continuous.ContinuousAuditHandler, "start") self.m_continuoushandler = p_continuoushandler.start() self.addCleanup(p_continuoushandler.stop) def tearDown(self): super(TestDecisionEngine, self).tearDown() self.conf._parse_cli_opts = self._parse_cli_opts @mock.patch.object(sync.Syncer, "sync", mock.Mock()) @mock.patch.object(service, "launch") def test_run_de_app(self, m_launch): decisionengine.main() self.assertEqual(1, m_launch.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/cmd/test_status.py0000664000175000017500000000306200000000000023154 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_upgradecheck.upgradecheck import Code from watcher.cmd import status from watcher import conf from watcher.tests import base CONF = conf.CONF class TestUpgradeChecks(base.TestCase): def setUp(self): super(TestUpgradeChecks, self).setUp() self.cmd = status.Checks() def test_minimum_nova_api_version_ok(self): # Tests that the default [nova_client]/api_version meets the minimum # required version. result = self.cmd._minimum_nova_api_version() self.assertEqual(Code.SUCCESS, result.code) def test_minimum_nova_api_version_fail(self): # Tests the scenario that [nova_client]/api_version is less than the # minimum required version. CONF.set_override('api_version', '2.47', group='nova_client') result = self.cmd._minimum_nova_api_version() self.assertEqual(Code.FAILURE, result.code) self.assertIn('Invalid nova_client.api_version 2.47.', result.details) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6431353 python_watcher-14.0.0/watcher/tests/common/0000775000175000017500000000000000000000000020744 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/__init__.py0000664000175000017500000000000000000000000023043 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6431353 python_watcher-14.0.0/watcher/tests/common/loader/0000775000175000017500000000000000000000000022212 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/loader/__init__.py0000664000175000017500000000000000000000000024311 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/loader/test_loader.py0000664000175000017500000000703400000000000025075 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_config import cfg from stevedore import driver as drivermanager from stevedore import extension as stevedore_extension from watcher.common import exception from watcher.common.loader import default from watcher.common.loader import loadable from watcher.tests import base class FakeLoadable(loadable.Loadable): @classmethod def get_config_opts(cls): return [] class FakeLoadableWithOpts(loadable.Loadable): @classmethod def get_config_opts(cls): return [ cfg.StrOpt("test_opt", default="fake_with_opts"), ] class TestLoader(base.TestCase): def setUp(self): super(TestLoader, self).setUp() def _fake_parse(self, *args, **kw): return cfg.ConfigOpts._parse_cli_opts(cfg.CONF, []) cfg.CONF._parse_cli_opts = _fake_parse def test_load_loadable_no_opt(self): fake_driver = drivermanager.DriverManager.make_test_instance( extension=stevedore_extension.Extension( name="fake", entry_point="%s:%s" % (FakeLoadable.__module__, FakeLoadable.__name__), plugin=FakeLoadable, obj=None), namespace="TESTING") loader_manager = default.DefaultLoader(namespace='TESTING') with mock.patch.object(drivermanager, "DriverManager") as m_driver_manager: m_driver_manager.return_value = fake_driver loaded_driver = loader_manager.load(name='fake') self.assertIsInstance(loaded_driver, FakeLoadable) @mock.patch("watcher.common.loader.default.drivermanager.DriverManager") def test_load_loadable_bad_plugin(self, m_driver_manager): m_driver_manager.side_effect = Exception() loader_manager = default.DefaultLoader(namespace='TESTING') self.assertRaises(exception.LoadingError, loader_manager.load, name='bad_driver') def test_load_loadable_with_opts(self): fake_driver = drivermanager.DriverManager.make_test_instance( extension=stevedore_extension.Extension( name="fake", entry_point="%s:%s" % (FakeLoadableWithOpts.__module__, FakeLoadableWithOpts.__name__), plugin=FakeLoadableWithOpts, obj=None), namespace="TESTING") loader_manager = default.DefaultLoader(namespace='TESTING') with mock.patch.object(drivermanager, "DriverManager") as m_driver_manager: m_driver_manager.return_value = fake_driver loaded_driver = loader_manager.load(name='fake') self.assertIsInstance(loaded_driver, FakeLoadableWithOpts) self.assertEqual( "fake_with_opts", loaded_driver.config.get("test_opt")) self.assertEqual( "fake_with_opts", loaded_driver.config.test_opt) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6471353 python_watcher-14.0.0/watcher/tests/common/metal_helper/0000775000175000017500000000000000000000000023405 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/metal_helper/__init__.py0000664000175000017500000000000000000000000025504 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/metal_helper/test_base.py0000664000175000017500000000557000000000000025737 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from watcher.common import exception from watcher.common.metal_helper import base as m_helper_base from watcher.common.metal_helper import constants as m_constants from watcher.tests import base # The base classes have abstract methods, we'll need to # stub them. class MockMetalNode(m_helper_base.BaseMetalNode): def get_power_state(self): raise NotImplementedError() def get_id(self): raise NotImplementedError() def power_on(self): raise NotImplementedError() def power_off(self): raise NotImplementedError() class MockMetalHelper(m_helper_base.BaseMetalHelper): def list_compute_nodes(self): pass def get_node(self, node_id): pass class TestBaseMetalNode(base.TestCase): def setUp(self): super().setUp() self._nova_node = mock.Mock() self._node = MockMetalNode(self._nova_node) def test_get_hypervisor_node(self): self.assertEqual( self._nova_node, self._node.get_hypervisor_node()) def test_get_hypervisor_node_missing(self): node = MockMetalNode() self.assertRaises( exception.Invalid, node.get_hypervisor_node) def test_get_hypervisor_hostname(self): self.assertEqual( self._nova_node.hypervisor_hostname, self._node.get_hypervisor_hostname()) @mock.patch.object(MockMetalNode, 'power_on') @mock.patch.object(MockMetalNode, 'power_off') def test_set_power_state(self, mock_power_off, mock_power_on): self._node.set_power_state(m_constants.PowerState.ON) mock_power_on.assert_called_once_with() self._node.set_power_state(m_constants.PowerState.OFF) mock_power_off.assert_called_once_with() self.assertRaises( exception.UnsupportedActionType, self._node.set_power_state, m_constants.PowerState.UNKNOWN) class TestBaseMetalHelper(base.TestCase): def setUp(self): super().setUp() self._osc = mock.Mock() self._helper = MockMetalHelper(self._osc) def test_nova_client_attr(self): self.assertEqual(self._osc.nova.return_value, self._helper.nova_client) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/metal_helper/test_factory.py0000664000175000017500000000254100000000000026467 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from watcher.common import clients from watcher.common.metal_helper import factory from watcher.common.metal_helper import ironic from watcher.common.metal_helper import maas from watcher.tests import base class TestMetalHelperFactory(base.TestCase): @mock.patch.object(clients, 'OpenStackClients') @mock.patch.object(maas, 'MaasHelper') @mock.patch.object(ironic, 'IronicHelper') def test_factory(self, mock_ironic, mock_maas, mock_osc): self.assertEqual( mock_ironic.return_value, factory.get_helper()) self.config(url="fake_maas_url", group="maas_client") self.assertEqual( mock_maas.return_value, factory.get_helper()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/metal_helper/test_ironic.py0000664000175000017500000001113600000000000026303 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from watcher.common.metal_helper import constants as m_constants from watcher.common.metal_helper import ironic from watcher.tests import base class TestIronicNode(base.TestCase): def setUp(self): super().setUp() self._wrapped_node = mock.Mock() self._nova_node = mock.Mock() self._ironic_client = mock.Mock() self._node = ironic.IronicNode( self._wrapped_node, self._nova_node, self._ironic_client) def test_get_power_state(self): states = ( "power on", "power off", "rebooting", "soft power off", "soft reboot", 'SomeOtherState') type(self._wrapped_node).power_state = mock.PropertyMock( side_effect=states) expected_states = ( m_constants.PowerState.ON, m_constants.PowerState.OFF, m_constants.PowerState.ON, m_constants.PowerState.OFF, m_constants.PowerState.ON, m_constants.PowerState.UNKNOWN) for expected_state in expected_states: actual_state = self._node.get_power_state() self.assertEqual(expected_state, actual_state) def test_get_id(self): self.assertEqual( self._wrapped_node.uuid, self._node.get_id()) def test_power_on(self): self._node.power_on() self._ironic_client.node.set_power_state.assert_called_once_with( self._wrapped_node.uuid, "on") def test_power_off(self): self._node.power_off() self._ironic_client.node.set_power_state.assert_called_once_with( self._wrapped_node.uuid, "off") class TestIronicHelper(base.TestCase): def setUp(self): super().setUp() self._mock_osc = mock.Mock() self._mock_nova_client = self._mock_osc.nova.return_value self._mock_ironic_client = self._mock_osc.ironic.return_value self._helper = ironic.IronicHelper(osc=self._mock_osc) def test_list_compute_nodes(self): mock_machines = [ mock.Mock( extra=dict(compute_node_id=mock.sentinel.compute_node_id)), mock.Mock( extra=dict(compute_node_id=mock.sentinel.compute_node_id2)), mock.Mock( extra=dict()) ] mock_hypervisor = mock.Mock() self._mock_ironic_client.node.list.return_value = mock_machines self._mock_ironic_client.node.get.side_effect = mock_machines self._mock_nova_client.hypervisors.get.side_effect = ( mock_hypervisor, None) out_nodes = self._helper.list_compute_nodes() self.assertEqual(1, len(out_nodes)) out_node = out_nodes[0] self.assertIsInstance(out_node, ironic.IronicNode) self.assertEqual(mock_hypervisor, out_node._nova_node) self.assertEqual(mock_machines[0], out_node._ironic_node) self.assertEqual(self._mock_ironic_client, out_node._ironic_client) def test_get_node(self): mock_machine = mock.Mock( extra=dict(compute_node_id=mock.sentinel.compute_node_id)) self._mock_ironic_client.node.get.return_value = mock_machine out_node = self._helper.get_node(mock.sentinel.id) self.assertEqual(self._mock_nova_client.hypervisors.get.return_value, out_node._nova_node) self.assertEqual(self._mock_ironic_client, out_node._ironic_client) self.assertEqual(mock_machine, out_node._ironic_node) def test_get_node_not_a_hypervisor(self): mock_machine = mock.Mock(extra=dict(compute_node_id=None)) self._mock_ironic_client.node.get.return_value = mock_machine out_node = self._helper.get_node(mock.sentinel.id) self._mock_nova_client.hypervisors.get.assert_not_called() self.assertIsNone(out_node._nova_node) self.assertEqual(self._mock_ironic_client, out_node._ironic_client) self.assertEqual(mock_machine, out_node._ironic_node) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/metal_helper/test_maas.py0000664000175000017500000001042400000000000025740 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock try: from maas.client import enum as maas_enum except ImportError: maas_enum = None from watcher.common.metal_helper import constants as m_constants from watcher.common.metal_helper import maas from watcher.tests import base class TestMaasNode(base.TestCase): def setUp(self): super().setUp() self._wrapped_node = mock.Mock() self._nova_node = mock.Mock() self._maas_client = mock.Mock() self._node = maas.MaasNode( self._wrapped_node, self._nova_node, self._maas_client) def test_get_power_state(self): if not maas_enum: self.skipTest("python-libmaas not installed.") self._wrapped_node.query_power_state.side_effect = ( maas_enum.PowerState.ON, maas_enum.PowerState.OFF, maas_enum.PowerState.ERROR, maas_enum.PowerState.UNKNOWN, 'SomeOtherState') expected_states = ( m_constants.PowerState.ON, m_constants.PowerState.OFF, m_constants.PowerState.ERROR, m_constants.PowerState.UNKNOWN, m_constants.PowerState.UNKNOWN) for expected_state in expected_states: actual_state = self._node.get_power_state() self.assertEqual(expected_state, actual_state) def test_get_id(self): self.assertEqual( self._wrapped_node.system_id, self._node.get_id()) def test_power_on(self): self._node.power_on() self._wrapped_node.power_on.assert_called_once_with() def test_power_off(self): self._node.power_off() self._wrapped_node.power_off.assert_called_once_with() class TestMaasHelper(base.TestCase): def setUp(self): super().setUp() self._mock_osc = mock.Mock() self._mock_nova_client = self._mock_osc.nova.return_value self._mock_maas_client = self._mock_osc.maas.return_value self._helper = maas.MaasHelper(osc=self._mock_osc) def test_list_compute_nodes(self): compute_fqdn = "compute-0" # some other MAAS node, not a Nova node ctrl_fqdn = "ctrl-1" mock_machines = [ mock.Mock(fqdn=compute_fqdn, system_id=mock.sentinel.compute_node_id), mock.Mock(fqdn=ctrl_fqdn, system_id=mock.sentinel.ctrl_node_id), ] mock_hypervisors = [ mock.Mock(hypervisor_hostname=compute_fqdn), ] self._mock_maas_client.machines.list.return_value = mock_machines self._mock_nova_client.hypervisors.list.return_value = mock_hypervisors out_nodes = self._helper.list_compute_nodes() self.assertEqual(1, len(out_nodes)) out_node = out_nodes[0] self.assertIsInstance(out_node, maas.MaasNode) self.assertEqual(mock.sentinel.compute_node_id, out_node.get_id()) self.assertEqual(compute_fqdn, out_node.get_hypervisor_hostname()) def test_get_node(self): mock_machine = mock.Mock(fqdn='compute-0') self._mock_maas_client.machines.get.return_value = mock_machine mock_compute_nodes = [ mock.Mock(hypervisor_hostname="compute-011"), mock.Mock(hypervisor_hostname="compute-0"), mock.Mock(hypervisor_hostname="compute-01"), ] self._mock_nova_client.hypervisors.search.return_value = ( mock_compute_nodes) out_node = self._helper.get_node(mock.sentinel.id) self.assertEqual(mock_compute_nodes[1], out_node._nova_node) self.assertEqual(self._mock_maas_client, out_node._maas_client) self.assertEqual(mock_machine, out_node._maas_node) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/test_cinder_helper.py0000664000175000017500000004051400000000000025164 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock from http import HTTPStatus import time from cinderclient import exceptions as cinder_exception from watcher.common import cinder_helper from watcher.common import clients from watcher.common import exception from watcher.common import utils from watcher.tests import base @mock.patch.object(clients.OpenStackClients, 'cinder') class TestCinderHelper(base.TestCase): @staticmethod def fake_storage_node(**kwargs): node = mock.MagicMock() node.binary = kwargs.get('binary', 'cinder-volume') node.host = kwargs.get('name', 'host@backend') return node def test_get_storage_node_list(self, mock_cinder): node1 = self.fake_storage_node() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.services.list.return_value = [node1] cinder_util.get_storage_node_list() cinder_util.cinder.services.list.assert_called_once_with( binary='cinder-volume') def test_get_storage_node_by_name_success(self, mock_cinder): node1 = self.fake_storage_node() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.services.list.return_value = [node1] node = cinder_util.get_storage_node_by_name('host@backend') self.assertEqual(node, node1) def test_get_storage_node_by_name_failure(self, mock_cinder): node1 = self.fake_storage_node() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.services.list.return_value = [node1] self.assertRaisesRegex( exception.StorageNodeNotFound, "The storage node failure could not be found", cinder_util.get_storage_node_by_name, 'failure') @staticmethod def fake_pool(**kwargs): pool = mock.MagicMock() pool.name = kwargs.get('name', 'host@backend#pool') return pool def test_get_storage_pool_list(self, mock_cinder): pool = self.fake_pool() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.pools.list.return_value = [pool] cinder_util.get_storage_pool_list() cinder_util.cinder.pools.list.assert_called_once_with(detailed=True) def test_get_storage_pool_by_name_success(self, mock_cinder): pool1 = self.fake_pool() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.pools.list.return_value = [pool1] pool = cinder_util.get_storage_pool_by_name('host@backend#pool') self.assertEqual(pool, pool1) def test_get_storage_pool_by_name_failure(self, mock_cinder): pool1 = self.fake_pool() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.services.list.return_value = [pool1] self.assertRaisesRegex( exception.PoolNotFound, "The pool failure could not be found", cinder_util.get_storage_pool_by_name, 'failure') @staticmethod def fake_volume_type(**kwargs): volume_type = mock.MagicMock() volume_type.name = kwargs.get('name', 'fake_type') extra_specs = {'volume_backend_name': 'backend'} volume_type.extra_specs = kwargs.get('extra_specs', extra_specs) return volume_type def test_get_volume_type_list(self, mock_cinder): volume_type1 = self.fake_volume_type() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.volume_types.list.return_value = [volume_type1] cinder_util.get_volume_type_list() cinder_util.cinder.volume_types.list.assert_called_once_with() def test_get_volume_type_by_backendname_with_backend_exist( self, mock_cinder): volume_type1 = self.fake_volume_type() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.volume_types.list.return_value = [volume_type1] volume_type_name = cinder_util.get_volume_type_by_backendname( 'backend') self.assertEqual(volume_type_name[0], volume_type1.name) def test_get_volume_type_by_backendname_with_no_backend_exist( self, mock_cinder): volume_type1 = self.fake_volume_type() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.volume_types.list.return_value = [volume_type1] volume_type_name = cinder_util.get_volume_type_by_backendname( 'nobackend') self.assertEqual([], volume_type_name) @staticmethod def fake_volume(**kwargs): volume = mock.MagicMock() volume.id = kwargs.get('id', '45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba') volume.name = kwargs.get('name', 'fakename') volume.size = kwargs.get('size', '1') volume.status = kwargs.get('status', 'available') volume.snapshot_id = kwargs.get('snapshot_id', None) volume.availability_zone = kwargs.get('availability_zone', 'nova') volume.volume_type = kwargs.get('volume_type', 'fake_type') return volume @mock.patch.object(time, 'sleep', mock.Mock()) def test_migrate_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'migration_status', 'success') cinder_util.cinder.volumes.get.return_value = volume volume_type = self.fake_volume_type() cinder_util.cinder.volume_types.list.return_value = [volume_type] result = cinder_util.migrate(volume, 'host@backend#pool') self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_migrate_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume volume_type = self.fake_volume_type() volume_type.name = 'notbackend' cinder_util.cinder.volume_types.list.return_value = [volume_type] self.assertRaisesRegex( exception.Invalid, "Volume type must be same for migrating", cinder_util.migrate, volume, 'host@backend#pool') volume = self.fake_volume() setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'migration_status', 'error') cinder_util.cinder.volumes.get.return_value = volume volume_type = self.fake_volume_type() cinder_util.cinder.volume_types.list.return_value = [volume_type] result = cinder_util.migrate(volume, 'host@backend#pool') self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_retype_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'migration_status', 'success') cinder_util.cinder.volumes.get.return_value = volume result = cinder_util.retype(volume, 'notfake_type') self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_retype_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'migration_status', 'success') cinder_util.cinder.volumes.get.return_value = volume self.assertRaisesRegex( exception.Invalid, "Volume type must be different for retyping", cinder_util.retype, volume, 'fake_type') volume = self.fake_volume() setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'migration_status', 'error') cinder_util.cinder.volumes.get.return_value = volume result = cinder_util.retype(volume, 'notfake_type') self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_create_volume_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume cinder_util.cinder.volumes.create.return_value = volume new_vloume = cinder_util.create_volume( cinder_util.cinder, volume, 'fake_type') self.assertEqual(new_vloume, volume) @mock.patch.object(time, 'sleep', mock.Mock()) def test_create_volume_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'status', 'fake_status') cinder_util.cinder.volumes.get.return_value = volume cinder_util.cinder.volumes.create.return_value = volume self.assertRaisesRegex( Exception, "Failed to create volume", cinder_util.create_volume, cinder_util.cinder, volume, 'fake_type', retry=2, retry_interval=1) @mock.patch.object(time, 'sleep', mock.Mock()) def test_delete_volume_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume cinder_util.cinder.volumes.create.return_value = volume cinder_util.check_volume_deleted = mock.MagicMock(return_value=True) result = cinder_util.delete_volume(volume) self.assertIsNone(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_delete_volume_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'status', 'fake_status') cinder_util.cinder.volumes.get.return_value = volume cinder_util.cinder.volumes.create.return_value = volume cinder_util.check_volume_deleted = mock.MagicMock(return_value=False) self.assertRaisesRegex( Exception, "Failed to delete volume", cinder_util.delete_volume, volume) @mock.patch.object(time, 'sleep', mock.Mock()) def test_can_get_volume_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.get_volume = mock.MagicMock(return_value=volume) result = cinder_util._can_get_volume(volume.id) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_can_get_volume_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.get_volume = mock.MagicMock() cinder_util.get_volume.side_effect =\ cinder_exception.NotFound(HTTPStatus.NOT_FOUND) result = cinder_util._can_get_volume(volume.id) self.assertFalse(result) cinder_util.get_volume = mock.MagicMock(return_value=None) self.assertRaises( Exception, cinder_util._can_get_volume, volume.id) @mock.patch.object(time, 'sleep', mock.Mock()) def test_has_snapshot_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() volume.snapshot_id = utils.generate_uuid() cinder_util.get_volume = mock.MagicMock(return_value=volume) result = cinder_util._has_snapshot(volume) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_has_snapshot_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() volume.snapshot_id = None cinder_util.get_volume = mock.MagicMock(return_value=volume) result = cinder_util._has_snapshot(volume) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_get_volume_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume result = cinder_util.get_volume(volume) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_get_volume_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() side_effect = cinder_exception.NotFound(HTTPStatus.NOT_FOUND) cinder_util.cinder.volumes.get.side_effect = side_effect cinder_util.cinder.volumes.find.return_value = False result = cinder_util.get_volume(volume) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_check_volume_deleted_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume cinder_util._can_get_volume = mock.MagicMock(return_value=None) result = cinder_util.check_volume_deleted( volume, retry=2, retry_interval=1) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_check_volume_deleted_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume cinder_util._can_get_volume = mock.MagicMock(return_value=volume) result = cinder_util.check_volume_deleted( volume, retry=2, retry_interval=1) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_check_migrated_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'migration_status', 'success') setattr(volume, 'os-vol-host-attr:host', 'host@backend#pool') cinder_util.cinder.volumes.get.return_value = volume cinder_util.check_volume_deleted = mock.MagicMock(return_value=True) result = cinder_util.check_migrated(volume) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_check_migrated_fail(self, mock_cinder): def side_effect(volume): if isinstance(volume, str): volume = self.fake_volume() setattr(volume, 'migration_status', 'error') elif volume.id is None: setattr(volume, 'migration_status', 'fake_status') setattr(volume, 'id', utils.generate_uuid()) return volume cinder_util = cinder_helper.CinderHelper() # verify that the method check_migrated will return False when the # status of migration_status is error. volume = self.fake_volume() setattr(volume, 'migration_status', 'error') setattr(volume, 'os-vol-host-attr:host', 'source_node') cinder_util.cinder.volumes.get.return_value = volume result = cinder_util.check_migrated(volume, retry_interval=1) self.assertFalse(result) # verify that the method check_migrated will return False when the # status of migration_status is in other cases. volume = self.fake_volume() setattr(volume, 'migration_status', 'success') setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'id', None) cinder_util.get_volume = mock.MagicMock() cinder_util.get_volume.side_effect = side_effect result = cinder_util.check_migrated(volume, retry_interval=1) self.assertFalse(result) # verify that the method check_migrated will return False when the # return_value of method check_volume_deleted is False. volume = self.fake_volume() setattr(volume, 'migration_status', 'success') setattr(volume, 'os-vol-host-attr:host', 'source_node') cinder_util.cinder.volumes.get.return_value = volume cinder_util.check_volume_deleted = mock.MagicMock(return_value=False) cinder_util.get_deleting_volume = mock.MagicMock(return_value=volume) result = cinder_util.check_migrated(volume, retry_interval=1) self.assertFalse(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/test_clients.py0000664000175000017500000004263200000000000024025 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinderclient import client as ciclient from cinderclient.v3 import client as ciclient_v3 from glanceclient import client as glclient from gnocchiclient import client as gnclient from gnocchiclient.v1 import client as gnclient_v1 from ironicclient import client as irclient from ironicclient.v1 import client as irclient_v1 from keystoneauth1 import adapter as ka_adapter from keystoneauth1 import loading as ka_loading from monascaclient import client as monclient from monascaclient.v2_0 import client as monclient_v2 from neutronclient.neutron import client as netclient from neutronclient.v2_0 import client as netclient_v2 from novaclient import client as nvclient from watcher.common import clients from watcher import conf from watcher.tests import base CONF = conf.CONF class TestClients(base.TestCase): def _register_watcher_clients_auth_opts(self): _AUTH_CONF_GROUP = 'watcher_clients_auth' ka_loading.register_auth_conf_options(CONF, _AUTH_CONF_GROUP) ka_loading.register_session_conf_options(CONF, _AUTH_CONF_GROUP) CONF.set_override('auth_type', 'password', group=_AUTH_CONF_GROUP) # ka_loading.load_auth_from_conf_options(CONF, _AUTH_CONF_GROUP) # ka_loading.load_session_from_conf_options(CONF, _AUTH_CONF_GROUP) # CONF.set_override( # 'auth-url', 'http://server.ip:5000', group=_AUTH_CONF_GROUP) # If we don't clean up the _AUTH_CONF_GROUP conf options, then other # tests that run after this one will fail, complaining about required # options that _AUTH_CONF_GROUP wants. def cleanup_conf_from_loading(): # oslo_config doesn't seem to allow unregistering groups through a # single method, so we do this instead CONF.reset() del CONF._groups[_AUTH_CONF_GROUP] self.addCleanup(cleanup_conf_from_loading) def reset_register_opts_mock(conf_obj, original_method): conf_obj.register_opts = original_method original_register_opts = CONF.register_opts self.addCleanup(reset_register_opts_mock, CONF, original_register_opts) expected = {'username': 'foousername', 'password': 'foopassword', 'auth_url': 'http://server.ip:5000', 'cafile': None, 'certfile': None, 'keyfile': None, 'insecure': False, 'user_domain_id': 'foouserdomainid', 'project_domain_id': 'fooprojdomainid'} # Because some of the conf options for auth plugins are not registered # until right before they are loaded, and because the method that does # the actual loading of the conf option values is an anonymous method # (see _getter method of load_from_conf_options in # keystoneauth1.loading.conf.py), we need to manually monkey patch # the register opts method so that we can override the conf values to # our custom values. def mock_register_opts(*args, **kwargs): ret = original_register_opts(*args, **kwargs) if 'group' in kwargs and kwargs['group'] == _AUTH_CONF_GROUP: for key, value in expected.items(): CONF.set_override(key, value, group=_AUTH_CONF_GROUP) return ret CONF.register_opts = mock_register_opts def test_get_keystone_session(self): self._register_watcher_clients_auth_opts() osc = clients.OpenStackClients() expected = {'username': 'foousername', 'password': 'foopassword', 'auth_url': 'http://server.ip:5000', 'user_domain_id': 'foouserdomainid', 'project_domain_id': 'fooprojdomainid'} sess = osc.session self.assertEqual(expected['auth_url'], sess.auth.auth_url) self.assertEqual(expected['username'], sess.auth._username) self.assertEqual(expected['password'], sess.auth._password) self.assertEqual(expected['user_domain_id'], sess.auth._user_domain_id) self.assertEqual(expected['project_domain_id'], sess.auth._project_domain_id) @mock.patch.object(nvclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_nova(self, mock_session, mock_call): osc = clients.OpenStackClients() osc._nova = None osc.nova() mock_call.assert_called_once_with( CONF.nova_client.api_version, endpoint_type=CONF.nova_client.endpoint_type, region_name=CONF.nova_client.region_name, session=mock_session) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_nova_diff_vers(self, mock_session): CONF.set_override('api_version', '2.60', group='nova_client') osc = clients.OpenStackClients() osc._nova = None osc.nova() self.assertEqual('2.60', osc.nova().api_version.get_string()) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_nova_bad_min_version(self, mock_session): CONF.set_override('api_version', '2.47', group='nova_client') osc = clients.OpenStackClients() osc._nova = None ex = self.assertRaises(ValueError, osc.nova) self.assertIn('Invalid nova_client.api_version 2.47', str(ex)) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_nova_diff_endpoint(self, mock_session): CONF.set_override('endpoint_type', 'publicURL', group='nova_client') osc = clients.OpenStackClients() osc._nova = None osc.nova() self.assertEqual('publicURL', osc.nova().client.interface) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_nova_cached(self, mock_session): osc = clients.OpenStackClients() osc._nova = None nova = osc.nova() nova_cached = osc.nova() self.assertEqual(nova, nova_cached) @mock.patch.object(glclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_glance(self, mock_session, mock_call): osc = clients.OpenStackClients() osc._glance = None osc.glance() mock_call.assert_called_once_with( CONF.glance_client.api_version, interface=CONF.glance_client.endpoint_type, region_name=CONF.glance_client.region_name, session=mock_session) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_glance_diff_vers(self, mock_session): CONF.set_override('api_version', '1', group='glance_client') osc = clients.OpenStackClients() osc._glance = None osc.glance() self.assertEqual(1.0, osc.glance().version) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_glance_diff_endpoint(self, mock_session): CONF.set_override('endpoint_type', 'internalURL', group='glance_client') osc = clients.OpenStackClients() osc._glance = None osc.glance() self.assertEqual('internalURL', osc.glance().http_client.interface) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_glance_cached(self, mock_session): osc = clients.OpenStackClients() osc._glance = None glance = osc.glance() glance_cached = osc.glance() self.assertEqual(glance, glance_cached) @mock.patch.object(gnclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_gnocchi(self, mock_session, mock_call): osc = clients.OpenStackClients() osc._gnocchi = None osc.gnocchi() mock_call.assert_called_once_with( CONF.gnocchi_client.api_version, adapter_options={ "interface": CONF.gnocchi_client.endpoint_type, "region_name": CONF.gnocchi_client.region_name}, session=mock_session) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_gnocchi_diff_vers(self, mock_session): # gnocchiclient currently only has one version (v1) CONF.set_override('api_version', '1', group='gnocchi_client') osc = clients.OpenStackClients() osc._gnocchi = None osc.gnocchi() self.assertEqual(gnclient_v1.Client, type(osc.gnocchi())) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_gnocchi_diff_endpoint(self, mock_session): # gnocchiclient currently only has one version (v1) CONF.set_override('endpoint_type', 'publicURL', group='gnocchi_client') osc = clients.OpenStackClients() osc._gnocchi = None osc.gnocchi() self.assertEqual('publicURL', osc.gnocchi().api.interface) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_gnocchi_cached(self, mock_session): osc = clients.OpenStackClients() osc._gnocchi = None gnocchi = osc.gnocchi() gnocchi_cached = osc.gnocchi() self.assertEqual(gnocchi, gnocchi_cached) @mock.patch.object(ciclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_cinder(self, mock_session, mock_call): osc = clients.OpenStackClients() osc._cinder = None osc.cinder() mock_call.assert_called_once_with( CONF.cinder_client.api_version, endpoint_type=CONF.cinder_client.endpoint_type, region_name=CONF.cinder_client.region_name, session=mock_session) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_cinder_diff_vers(self, mock_session): CONF.set_override('api_version', '3', group='cinder_client') osc = clients.OpenStackClients() osc._cinder = None osc.cinder() self.assertEqual(ciclient_v3.Client, type(osc.cinder())) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_cinder_diff_endpoint(self, mock_session): CONF.set_override('endpoint_type', 'internalURL', group='cinder_client') osc = clients.OpenStackClients() osc._cinder = None osc.cinder() self.assertEqual('internalURL', osc.cinder().client.interface) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_cinder_cached(self, mock_session): osc = clients.OpenStackClients() osc._cinder = None cinder = osc.cinder() cinder_cached = osc.cinder() self.assertEqual(cinder, cinder_cached) @mock.patch.object(netclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_neutron(self, mock_session, mock_call): osc = clients.OpenStackClients() osc._neutron = None osc.neutron() mock_call.assert_called_once_with( CONF.neutron_client.api_version, endpoint_type=CONF.neutron_client.endpoint_type, region_name=CONF.neutron_client.region_name, session=mock_session) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_neutron_diff_vers(self, mock_session): '''neutronclient currently only has one version (v2)''' CONF.set_override('api_version', '2.0', group='neutron_client') osc = clients.OpenStackClients() osc._neutron = None osc.neutron() self.assertEqual(netclient_v2.Client, type(osc.neutron())) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_neutron_diff_endpoint(self, mock_session): '''neutronclient currently only has one version (v2)''' CONF.set_override('endpoint_type', 'internalURL', group='neutron_client') osc = clients.OpenStackClients() osc._neutron = None osc.neutron() self.assertEqual('internalURL', osc.neutron().httpclient.interface) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_neutron_cached(self, mock_session): osc = clients.OpenStackClients() osc._neutron = None neutron = osc.neutron() neutron_cached = osc.neutron() self.assertEqual(neutron, neutron_cached) @mock.patch.object(monclient, 'Client') @mock.patch.object(ka_loading, 'load_session_from_conf_options') def test_clients_monasca(self, mock_session, mock_call): mock_session.return_value = mock.Mock( get_endpoint=mock.Mock(return_value='test_endpoint'), get_token=mock.Mock(return_value='test_token'),) self._register_watcher_clients_auth_opts() osc = clients.OpenStackClients() osc._monasca = None osc.monasca() mock_call.assert_called_once_with( CONF.monasca_client.api_version, 'test_endpoint', auth_url='http://server.ip:5000', cert_file=None, insecure=False, key_file=None, keystone_timeout=None, os_cacert=None, password='foopassword', service_type='monitoring', token='test_token', username='foousername') @mock.patch.object(ka_loading, 'load_session_from_conf_options') def test_clients_monasca_diff_vers(self, mock_session): mock_session.return_value = mock.Mock( get_endpoint=mock.Mock(return_value='test_endpoint'), get_token=mock.Mock(return_value='test_token'),) self._register_watcher_clients_auth_opts() CONF.set_override('api_version', '2_0', group='monasca_client') osc = clients.OpenStackClients() osc._monasca = None osc.monasca() self.assertEqual(monclient_v2.Client, type(osc.monasca())) @mock.patch.object(ka_loading, 'load_session_from_conf_options') def test_clients_monasca_cached(self, mock_session): mock_session.return_value = mock.Mock( get_endpoint=mock.Mock(return_value='test_endpoint'), get_token=mock.Mock(return_value='test_token'),) self._register_watcher_clients_auth_opts() osc = clients.OpenStackClients() osc._monasca = None monasca = osc.monasca() monasca_cached = osc.monasca() self.assertEqual(monasca, monasca_cached) @mock.patch.object(irclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_ironic(self, mock_session, mock_call): ironic_url = 'http://localhost:6385/' mock_session.get_endpoint.return_value = ironic_url osc = clients.OpenStackClients() osc._ironic = None osc.ironic() mock_call.assert_called() @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_ironic_diff_vers(self, mock_session): ironic_url = 'http://localhost:6385/' mock_session.get_endpoint.return_value = ironic_url CONF.set_override('api_version', '1', group='ironic_client') osc = clients.OpenStackClients() osc._ironic = None osc.ironic() self.assertEqual(irclient_v1.Client, type(osc.ironic())) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_ironic_diff_endpoint(self, mock_session): ironic_url = 'http://localhost:6385/' mock_session.get_endpoint.return_value = ironic_url osc = clients.OpenStackClients() osc._ironic = None osc.ironic() mock_session.get_endpoint.assert_called_with( interface='publicURL', region_name=None, service_type='baremetal') CONF.set_override('endpoint_type', 'internalURL', group='ironic_client') osc._ironic = None osc.ironic() mock_session.get_endpoint.assert_called_with( interface='internalURL', region_name=None, service_type='baremetal') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_ironic_cached(self, mock_session): ironic_url = 'http://localhost:6385/' mock_session.get_endpoint.return_value = ironic_url osc = clients.OpenStackClients() osc._ironic = None ironic = osc.ironic() ironic_cached = osc.ironic() self.assertEqual(ironic, ironic_cached) @mock.patch.object(ka_adapter, 'Adapter') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_placement(self, mock_session, mock_call): osc = clients.OpenStackClients() osc.placement() headers = {'accept': 'application/json'} mock_call.assert_called_once_with( session=mock_session, service_type='placement', default_microversion=CONF.placement_client.api_version, interface=CONF.placement_client.interface, region_name=CONF.placement_client.region_name, additional_headers=headers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/test_ironic_helper.py0000664000175000017500000000416200000000000025202 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Authors:Yumeng Bao # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock from watcher.common import clients from watcher.common import exception from watcher.common import ironic_helper from watcher.common import utils as w_utils from watcher.tests import base class TestIronicHelper(base.TestCase): def setUp(self): super(TestIronicHelper, self).setUp() osc = clients.OpenStackClients() p_ironic = mock.patch.object(osc, 'ironic') p_ironic.start() self.addCleanup(p_ironic.stop) self.ironic_util = ironic_helper.IronicHelper(osc=osc) @staticmethod def fake_ironic_node(): node = mock.MagicMock() node.uuid = w_utils.generate_uuid() return node def test_get_ironic_node_list(self): node1 = self.fake_ironic_node() self.ironic_util.ironic.node.list.return_value = [node1] rt_nodes = self.ironic_util.get_ironic_node_list() self.assertEqual(rt_nodes, [node1]) def test_get_ironic_node_by_uuid_success(self): node1 = self.fake_ironic_node() self.ironic_util.ironic.node.get.return_value = node1 node = self.ironic_util.get_ironic_node_by_uuid(node1.uuid) self.assertEqual(node, node1) def test_get_ironic_node_by_uuid_failure(self): self.ironic_util.ironic.node.get.return_value = None self.assertRaisesRegex( exception.IronicNodeNotFound, "The ironic node node1 could not be found", self.ironic_util.get_ironic_node_by_uuid, 'node1') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/test_nova_helper.py0000664000175000017500000006755400000000000024700 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from unittest import mock from novaclient import api_versions import glanceclient.exc as glexceptions import novaclient.exceptions as nvexceptions from watcher.common import clients from watcher.common import exception from watcher.common import nova_helper from watcher.common import utils from watcher.tests import base @mock.patch.object(clients.OpenStackClients, 'nova') @mock.patch.object(clients.OpenStackClients, 'neutron') @mock.patch.object(clients.OpenStackClients, 'cinder') @mock.patch.object(clients.OpenStackClients, 'glance') class TestNovaHelper(base.TestCase): def setUp(self): super(TestNovaHelper, self).setUp() self.instance_uuid = "fb5311b7-37f3-457e-9cde-6494a3c59bfe" self.source_node = "ldev-indeedsrv005" self.destination_node = "ldev-indeedsrv006" self.flavor_name = "x1" @staticmethod def fake_server(*args, **kwargs): server = mock.MagicMock() server.id = args[0] server.status = 'ACTIVE' return server @staticmethod def fake_hypervisor(*args, **kwargs): hypervisor = mock.MagicMock() hypervisor.id = args[0] service_dict = {"host": args[1]} hypervisor.service = service_dict hypervisor.hypervisor_hostname = args[1] hypervisor.hypervisor_type = kwargs.pop('hypervisor_type', 'QEMU') return hypervisor @staticmethod def fake_migration(*args, **kwargs): migration = mock.MagicMock() migration.id = args[0] return migration @staticmethod def fake_nova_find_list(nova_util, fake_find=None, fake_list=None): nova_util.nova.servers.get.return_value = fake_find if list is None: nova_util.nova.servers.list.return_value = [] else: nova_util.nova.servers.list.return_value = [fake_list] @staticmethod def fake_nova_hypervisor_list(nova_util, fake_find=None, fake_list=None): nova_util.nova.hypervisors.get.return_value = fake_find nova_util.nova.hypervisors.list.return_value = fake_list @staticmethod def fake_nova_migration_list(nova_util, fake_list=None): if list is None: nova_util.nova.server_migrations.list.return_value = None else: nova_util.nova.server_migration.list.return_value = [fake_list] @staticmethod def fake_live_migrate(server, *args, **kwargs): def side_effect(*args, **kwargs): setattr(server, 'OS-EXT-SRV-ATTR:host', "compute-2") server.live_migrate.side_effect = side_effect @staticmethod def fake_confirm_resize(server, *args, **kwargs): def side_effect(*args, **kwargs): setattr(server, 'status', 'ACTIVE') server.confirm_resize.side_effect = side_effect @staticmethod def fake_cold_migrate(server, *args, **kwargs): def side_effect(*args, **kwargs): setattr(server, 'OS-EXT-SRV-ATTR:host', "compute-2") setattr(server, 'status', 'VERIFY_RESIZE') server.migrate.side_effect = side_effect def test_get_compute_node_by_hostname( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() hypervisor_id = utils.generate_uuid() hypervisor_name = "fake_hypervisor_1" hypervisor = self.fake_hypervisor(hypervisor_id, hypervisor_name) nova_util.nova.hypervisors.search.return_value = [hypervisor] # verify that the compute node can be obtained normally by name self.assertEqual( nova_util.get_compute_node_by_hostname(hypervisor_name), hypervisor) # verify that getting the compute node with the wrong name # will throw an exception. self.assertRaises( exception.ComputeNodeNotFound, nova_util.get_compute_node_by_hostname, "exception_hypervisor_1") # verify that when the result of getting the compute node is empty # will throw an exception. nova_util.nova.hypervisors.search.return_value = [] self.assertRaises( exception.ComputeNodeNotFound, nova_util.get_compute_node_by_hostname, hypervisor_name) def test_get_compute_node_by_hostname_multiple_matches(self, *mocks): # Tests a scenario where get_compute_node_by_name returns multiple # hypervisors and we have to pick the exact match based on the given # compute service hostname. nova_util = nova_helper.NovaHelper() nodes = [] # compute1 is a substring of compute10 to trigger the fuzzy match. for hostname in ('compute1', 'compute10'): node = mock.MagicMock() node.id = utils.generate_uuid() node.hypervisor_hostname = hostname node.service = {'host': hostname} nodes.append(node) # We should get back exact matches based on the service host. nova_util.nova.hypervisors.search.return_value = nodes for index, name in enumerate(['compute1', 'compute10']): result = nova_util.get_compute_node_by_hostname(name) self.assertIs(nodes[index], result) def test_get_compute_node_by_uuid( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() hypervisor_id = utils.generate_uuid() hypervisor_name = "fake_hypervisor_1" hypervisor = self.fake_hypervisor(hypervisor_id, hypervisor_name) nova_util.nova.hypervisors.get.return_value = hypervisor # verify that the compute node can be obtained normally by id self.assertEqual( nova_util.get_compute_node_by_uuid(hypervisor_id), hypervisor) def test_get_instance_list(self, *args): nova_util = nova_helper.NovaHelper() # Call it once with no filters. with mock.patch.object(nova_util, 'nova') as nova_mock: result = nova_util.get_instance_list() nova_mock.servers.list.assert_called_once_with( search_opts={'all_tenants': True}, marker=None, limit=-1) self.assertIs(result, nova_mock.servers.list.return_value) # Call it again with filters. with mock.patch.object(nova_util, 'nova') as nova_mock: result = nova_util.get_instance_list(filters={'host': 'fake-host'}) nova_mock.servers.list.assert_called_once_with( search_opts={'all_tenants': True, 'host': 'fake-host'}, marker=None, limit=-1) self.assertIs(result, nova_mock.servers.list.return_value) @mock.patch.object(time, 'sleep', mock.Mock()) def test_stop_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance_id = utils.generate_uuid() server = self.fake_server(instance_id) setattr(server, 'OS-EXT-STS:vm_state', 'stopped') self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) result = nova_util.stop_instance(instance_id) self.assertTrue(result) setattr(server, 'OS-EXT-STS:vm_state', 'active') result = nova_util.stop_instance(instance_id) self.assertFalse(result) self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) result = nova_util.stop_instance(instance_id) self.assertFalse(result) # verify that the method will return True when the state of instance # is in the expected state. setattr(server, 'OS-EXT-STS:vm_state', 'active') with mock.patch.object( nova_util, 'wait_for_instance_state', return_value=True ) as mock_instance_state: result = nova_util.stop_instance(instance_id) self.assertTrue(result) mock_instance_state.assert_called_once_with( mock.ANY, "stopped", 8, 10) # verify that the method stop_instance will return False when the # server is not available. nova_util.nova.servers.get.return_value = None result = nova_util.stop_instance(instance_id) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_delete_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance_id = utils.generate_uuid() # verify that the method will return False when the instance does # not exist. self.fake_nova_find_list(nova_util, fake_find=None, fake_list=None) result = nova_util.delete_instance(instance_id) self.assertFalse(result) # verify that the method will return True when the instance exists. server = self.fake_server(instance_id) self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) result = nova_util.delete_instance(instance_id) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_resize_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) setattr(server, 'status', 'VERIFY_RESIZE') self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) is_success = nova_util.resize_instance(self.instance_uuid, self.flavor_name) self.assertTrue(is_success) setattr(server, 'status', 'SOMETHING_ELSE') is_success = nova_util.resize_instance(self.instance_uuid, self.flavor_name) self.assertFalse(is_success) @mock.patch.object(time, 'sleep', mock.Mock()) def test_live_migrate_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) setattr(server, 'OS-EXT-SRV-ATTR:host', self.destination_node) self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) is_success = nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) self.assertTrue(is_success) setattr(server, 'OS-EXT-SRV-ATTR:host', self.source_node) self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) is_success = nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) self.assertFalse(is_success) # verify that the method will return False when the instance does # not exist. setattr(server, 'OS-EXT-SRV-ATTR:host', self.source_node) self.fake_nova_find_list(nova_util, fake_find=None, fake_list=None) is_success = nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) self.assertFalse(is_success) # verify that the method will return False when the instance status # is in other cases. setattr(server, 'status', 'fake_status') self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) is_success = nova_util.live_migrate_instance( self.instance_uuid, None ) self.assertFalse(is_success) @mock.patch.object(time, 'sleep', mock.Mock()) def test_live_migrate_instance_with_task_state( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) setattr(server, 'OS-EXT-SRV-ATTR:host', self.source_node) setattr(server, 'OS-EXT-STS:task_state', '') self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) time.sleep.assert_not_called() setattr(server, 'OS-EXT-STS:task_state', 'migrating') self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) time.sleep.assert_called_with(1) @mock.patch.object(time, 'sleep', mock.Mock()) def test_live_migrate_instance_no_destination_node( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) self.destination_node = None self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) self.fake_live_migrate(server) is_success = nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) self.assertTrue(is_success) def test_watcher_non_live_migrate_instance_not_found( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() self.fake_nova_find_list(nova_util, fake_find=None, fake_list=None) is_success = nova_util.watcher_non_live_migrate_instance( self.instance_uuid, self.destination_node) self.assertFalse(is_success) @mock.patch.object(time, 'sleep', mock.Mock()) def test_abort_live_migrate_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) setattr(server, 'OS-EXT-SRV-ATTR:host', self.source_node) setattr(server, 'OS-EXT-STS:task_state', None) migration = self.fake_migration(2) self.fake_nova_migration_list(nova_util, fake_list=migration) self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) self.assertTrue(nova_util.abort_live_migrate( self.instance_uuid, self.source_node, self.destination_node)) setattr(server, 'OS-EXT-SRV-ATTR:host', self.destination_node) self.assertFalse(nova_util.abort_live_migrate( self.instance_uuid, self.source_node, self.destination_node)) setattr(server, 'status', 'ERROR') self.assertRaises( Exception, nova_util.abort_live_migrate, self.instance_uuid, self.source_node, self.destination_node) server = self.fake_server(self.instance_uuid) setattr(server, 'OS-EXT-STS:task_state', "fake_task_state") setattr(server, 'OS-EXT-SRV-ATTR:host', self.destination_node) self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) self.fake_nova_migration_list(nova_util, fake_list=None) self.assertFalse(nova_util.abort_live_migrate( self.instance_uuid, self.source_node, self.destination_node)) def test_non_live_migrate_instance_no_destination_node( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) setattr(server, 'OS-EXT-SRV-ATTR:host', self.source_node) self.destination_node = None self.fake_nova_find_list(nova_util, fake_find=server, fake_list=server) self.fake_cold_migrate(server) self.fake_confirm_resize(server) is_success = nova_util.watcher_non_live_migrate_instance( self.instance_uuid, self.destination_node ) self.assertTrue(is_success) @mock.patch.object(time, 'sleep', mock.Mock()) def test_create_image_from_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance = self.fake_server(self.instance_uuid) image = mock.MagicMock() setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_node) setattr(instance, 'OS-EXT-STS:vm_state', "stopped") self.fake_nova_find_list( nova_util, fake_find=instance, fake_list=instance) image_uuid = 'fake-image-uuid' nova_util.nova.servers.create_image.return_value = image glance_client = mock.MagicMock() mock_glance.return_value = glance_client glance_client.images = {image_uuid: image} instance = nova_util.create_image_from_instance( self.instance_uuid, "Cirros" ) self.assertIsNotNone(instance) nova_util.glance.images.get.return_value = None instance = nova_util.create_image_from_instance( self.instance_uuid, "Cirros" ) self.assertIsNone(instance) def test_enable_service_nova_compute(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() nova_services = nova_util.nova.services nova_services.enable.return_value = mock.MagicMock( status='enabled') result = nova_util.enable_service_nova_compute('nanjing') self.assertTrue(result) nova_services.enable.return_value = mock.MagicMock( status='disabled') result = nova_util.enable_service_nova_compute('nanjing') self.assertFalse(result) def test_disable_service_nova_compute(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() nova_services = nova_util.nova.services nova_services.disable_log_reason.return_value = mock.MagicMock( status='enabled') result = nova_util.disable_service_nova_compute('nanjing') self.assertFalse(result) nova_services.disable_log_reason.return_value = mock.MagicMock( status='disabled') result = nova_util.disable_service_nova_compute('nanjing') self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_create_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance = self.fake_server(self.instance_uuid) nova_util.nova.servers.create.return_value = instance nova_util.nova.servers.get.return_value = instance create_instance = nova_util.create_instance(self.source_node) self.assertIsNotNone(create_instance) self.assertEqual(create_instance, instance) # verify that the method create_instance will return None when # the method findall raises exception. nova_util.nova.keypairs.findall.side_effect = nvexceptions.NotFound( 404) instance = nova_util.create_instance(self.source_node) self.assertIsNone(instance) nova_util.nova.keypairs.findall.side_effect = None # verify that the method create_instance will return None when # the method get raises exception. nova_util.glance.images.get.side_effect = glexceptions.NotFound(404) instance = nova_util.create_instance(self.source_node) self.assertIsNone(instance) nova_util.glance.images.get.side_effect = None # verify that the method create_instance will return None when # the method find raises exception. nova_util.nova.flavors.find.side_effect = nvexceptions.NotFound(404) instance = nova_util.create_instance(self.source_node) self.assertIsNone(instance) nova_util.nova.flavors.find.side_effect = None # verify that the method create_instance will return None when # the method get_security_group_id_from_name return None. with mock.patch.object( nova_util, 'get_security_group_id_from_name', return_value=None ) as mock_security_group_id: instance = nova_util.create_instance(self.source_node) self.assertIsNone(instance) mock_security_group_id.assert_called_once_with("default") # verify that the method create_instance will return None when # the method get_network_id_from_name return None. with mock.patch.object( nova_util, 'get_network_id_from_name', return_value=None ) as mock_get_network_id: instance = nova_util.create_instance(self.source_node) self.assertIsNone(instance) mock_get_network_id.assert_called_once_with("demo-net") # verify that the method create_instance will not return None when # the method wait_for_instance_status return True. with mock.patch.object( nova_util, 'wait_for_instance_status', return_value=True ) as mock_instance_status: instance = nova_util.create_instance(self.source_node) self.assertIsNotNone(instance) mock_instance_status.assert_called_once_with( mock.ANY, ('ACTIVE', 'ERROR'), 5, 10) @staticmethod def fake_volume(**kwargs): volume = mock.MagicMock() volume.id = kwargs.get('id', '45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba') volume.size = kwargs.get('size', '1') volume.status = kwargs.get('status', 'available') volume.snapshot_id = kwargs.get('snapshot_id', None) volume.availability_zone = kwargs.get('availability_zone', 'nova') return volume @mock.patch.object(time, 'sleep', mock.Mock()) def test_swap_volume(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) self.fake_nova_find_list(nova_util, fake_find=server, fake_list=server) old_volume = self.fake_volume( status='in-use', attachments=[{'server_id': self.instance_uuid}]) new_volume = self.fake_volume( id=utils.generate_uuid(), status='in-use') result = nova_util.swap_volume(old_volume, new_volume) self.assertTrue(result) # verify that the method will return False when the status of # new_volume is 'fake-use'. new_volume = self.fake_volume( id=utils.generate_uuid(), status='fake-use') result = nova_util.swap_volume(old_volume, new_volume) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_wait_for_volume_status(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() # verify that the method will return True when the status of volume # is in the expected status. fake_volume_1 = self.fake_volume(status='in-use') nova_util.cinder.volumes.get.return_value = fake_volume_1 result = nova_util.wait_for_volume_status( fake_volume_1, "in-use", timeout=2) self.assertTrue(result) # verify that the method will raise Exception when the status of # volume is not in the expected status. fake_volume_2 = self.fake_volume(status='fake-use') nova_util.cinder.volumes.get.return_value = fake_volume_2 self.assertRaises( Exception, nova_util.wait_for_volume_status, fake_volume_1, "in-use", timeout=2) def test_check_nova_api_version(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() # verify that the method will return True when the version of nova_api # is supported. api_versions.APIVersion = mock.MagicMock() result = nova_util._check_nova_api_version(nova_util.nova, "2.56") self.assertTrue(result) # verify that the method will return False when the version of nova_api # is not supported. side_effect = nvexceptions.UnsupportedVersion() api_versions.discover_version = mock.MagicMock( side_effect=side_effect) result = nova_util._check_nova_api_version(nova_util.nova, "2.56") self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_wait_for_instance_status(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance = self.fake_server(self.instance_uuid) # verify that the method will return True when the status of instance # is in the expected status. result = nova_util.wait_for_instance_status( instance, ('ACTIVE', 'ERROR'), 5, 10) self.assertTrue(result) # verify that the method will return False when the instance is None. result = nova_util.wait_for_instance_status( None, ('ACTIVE', 'ERROR'), 5, 10) self.assertFalse(result) # verify that the method will return False when the status of instance # is not in the expected status. self.fake_nova_find_list(nova_util, fake_find=instance, fake_list=None) result = nova_util.wait_for_instance_status( instance, ('ERROR'), 5, 10) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_confirm_resize(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance = self.fake_server(self.instance_uuid) self.fake_nova_find_list(nova_util, fake_find=instance, fake_list=None) # verify that the method will return True when the status of instance # is not in the expected status. result = nova_util.confirm_resize(instance, instance.status) self.assertTrue(result) # verify that the method will return False when the status of instance # is not in the expected status. result = nova_util.confirm_resize(instance, "fake_status") self.assertFalse(result) def test_get_compute_node_list( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() hypervisor1_id = utils.generate_uuid() hypervisor1_name = "fake_hypervisor_1" hypervisor1 = self.fake_hypervisor( hypervisor1_id, hypervisor1_name, hypervisor_type="QEMU") hypervisor2_id = utils.generate_uuid() hypervisor2_name = "fake_ironic" hypervisor2 = self.fake_hypervisor( hypervisor2_id, hypervisor2_name, hypervisor_type="ironic") nova_util.nova.hypervisors.list.return_value = [hypervisor1, hypervisor2] compute_nodes = nova_util.get_compute_node_list() # baremetal node should be removed self.assertEqual(1, len(compute_nodes)) self.assertEqual(hypervisor1_name, compute_nodes[0].hypervisor_hostname) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/test_placement_helper.py0000664000175000017500000002676100000000000025700 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus from unittest import mock from watcher.common import placement_helper from watcher.tests import base from watcher.tests import fakes as fake_requests from keystoneauth1 import loading as ka_loading from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import uuidutils CONF = cfg.CONF @mock.patch('keystoneauth1.session.Session.request') class TestPlacementHelper(base.TestCase): def setUp(self): super(TestPlacementHelper, self).setUp() _AUTH_CONF_GROUP = 'watcher_clients_auth' ka_loading.register_auth_conf_options(CONF, _AUTH_CONF_GROUP) ka_loading.register_session_conf_options(CONF, _AUTH_CONF_GROUP) self.client = placement_helper.PlacementHelper() self.fake_err_msg = { 'errors': [{ 'detail': 'The resource could not be found.', }] } def _add_default_kwargs(self, kwargs): kwargs['endpoint_filter'] = { 'service_type': 'placement', 'interface': CONF.placement_client.interface} kwargs['headers'] = {'accept': 'application/json'} kwargs['microversion'] = CONF.placement_client.api_version kwargs['raise_exc'] = False def _assert_keystone_called_once(self, kss_req, url, method, **kwargs): self._add_default_kwargs(kwargs) # request method has added param rate_semaphore since Stein cycle if 'rate_semaphore' in kss_req.call_args[1]: kwargs['rate_semaphore'] = mock.ANY kss_req.assert_called_once_with(url, method, **kwargs) def test_get(self, kss_req): kss_req.return_value = fake_requests.FakeResponse(HTTPStatus.OK) url = '/resource_providers' resp = self.client.get(url) self.assertEqual(HTTPStatus.OK, resp.status_code) self._assert_keystone_called_once(kss_req, url, 'GET') def test_get_resource_providers_OK(self, kss_req): rp_name = 'compute' rp_uuid = uuidutils.generate_uuid() parent_uuid = uuidutils.generate_uuid() fake_rp = [{'uuid': rp_uuid, 'name': rp_name, 'generation': 0, 'parent_provider_uuid': parent_uuid}] mock_json_data = { 'resource_providers': fake_rp } kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.OK, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_resource_providers(rp_name) expected_url = '/resource_providers?name=compute' self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_rp, result) def test_get_resource_providers_no_rp_OK(self, kss_req): rp_name = None rp_uuid = uuidutils.generate_uuid() parent_uuid = uuidutils.generate_uuid() fake_rp = [{'uuid': rp_uuid, 'name': 'compute', 'generation': 0, 'parent_provider_uuid': parent_uuid}] mock_json_data = { 'resource_providers': fake_rp } kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.OK, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_resource_providers(rp_name) expected_url = '/resource_providers' self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_rp, result) def test_get_resource_providers_fail(self, kss_req): rp_name = 'compute' kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.BAD_REQUEST, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_resource_providers(rp_name) self.assertIsNone(result) def test_get_inventories_OK(self, kss_req): rp_uuid = uuidutils.generate_uuid() fake_inventories = { "DISK_GB": { "allocation_ratio": 1.0, "max_unit": 35, "min_unit": 1, "reserved": 0, "step_size": 1, "total": 35 }, "MEMORY_MB": { "allocation_ratio": 1.5, "max_unit": 5825, "min_unit": 1, "reserved": 512, "step_size": 1, "total": 5825 }, "VCPU": { "allocation_ratio": 16.0, "max_unit": 4, "min_unit": 1, "reserved": 0, "step_size": 1, "total": 4 }, } mock_json_data = { 'inventories': fake_inventories, "resource_provider_generation": 7 } kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.OK, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_inventories(rp_uuid) expected_url = '/resource_providers/%s/inventories' % rp_uuid self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_inventories, result) def test_get_inventories_fail(self, kss_req): rp_uuid = uuidutils.generate_uuid() kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.NOT_FOUND, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_inventories(rp_uuid) self.assertIsNone(result) def test_get_provider_traits_OK(self, kss_req): rp_uuid = uuidutils.generate_uuid() fake_traits = ["CUSTOM_HW_FPGA_CLASS1", "CUSTOM_HW_FPGA_CLASS3"] mock_json_data = { 'traits': fake_traits, "resource_provider_generation": 7 } kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.OK, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_provider_traits(rp_uuid) expected_url = '/resource_providers/%s/traits' % rp_uuid self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_traits, result) def test_get_provider_traits_fail(self, kss_req): rp_uuid = uuidutils.generate_uuid() kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.NOT_FOUND, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_provider_traits(rp_uuid) self.assertIsNone(result) def test_get_allocations_for_consumer_OK(self, kss_req): c_uuid = uuidutils.generate_uuid() fake_allocations = { "92637880-2d79-43c6-afab-d860886c6391": { "generation": 2, "resources": { "DISK_GB": 5 } }, "ba8e1ef8-7fa3-41a4-9bb4-d7cb2019899b": { "generation": 8, "resources": { "MEMORY_MB": 512, "VCPU": 2 } } } mock_json_data = { 'allocations': fake_allocations, "consumer_generation": 1, "project_id": "7e67cbf7-7c38-4a32-b85b-0739c690991a", "user_id": "067f691e-725a-451a-83e2-5c3d13e1dffc" } kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.OK, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_allocations_for_consumer(c_uuid) expected_url = '/allocations/%s' % c_uuid self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_allocations, result) def test_get_allocations_for_consumer_fail(self, kss_req): c_uuid = uuidutils.generate_uuid() kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.NOT_FOUND, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_allocations_for_consumer(c_uuid) self.assertIsNone(result) def test_get_usages_for_resource_provider_OK(self, kss_req): rp_uuid = uuidutils.generate_uuid() fake_usages = { "DISK_GB": 1, "MEMORY_MB": 512, "VCPU": 1 } mock_json_data = { 'usages': fake_usages, "resource_provider_generation": 7 } kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.OK, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_usages_for_resource_provider(rp_uuid) expected_url = '/resource_providers/%s/usages' % rp_uuid self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_usages, result) def test_get_usages_for_resource_provider_fail(self, kss_req): rp_uuid = uuidutils.generate_uuid() kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.NOT_FOUND, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_usages_for_resource_provider(rp_uuid) self.assertIsNone(result) def test_get_candidate_providers_OK(self, kss_req): resources = 'VCPU:4,DISK_GB:64,MEMORY_MB:2048' fake_provider_summaries = { "a99bad54-a275-4c4f-a8a3-ac00d57e5c64": { "resources": { "DISK_GB": { "used": 0, "capacity": 1900 }, }, "traits": ["MISC_SHARES_VIA_AGGREGATE"], "parent_provider_uuid": None, "root_provider_uuid": "a99bad54-a275-4c4f-a8a3-ac00d57e5c64" }, "35791f28-fb45-4717-9ea9-435b3ef7c3b3": { "resources": { "VCPU": { "used": 0, "capacity": 384 }, "MEMORY_MB": { "used": 0, "capacity": 196608 }, }, "traits": ["HW_CPU_X86_SSE2", "HW_CPU_X86_AVX2"], "parent_provider_uuid": None, "root_provider_uuid": "35791f28-fb45-4717-9ea9-435b3ef7c3b3" }, } mock_json_data = { 'provider_summaries': fake_provider_summaries, } kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.OK, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_candidate_providers(resources) expected_url = "/allocation_candidates?%s" % resources self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_provider_summaries, result) def test_get_candidate_providers_fail(self, kss_req): rp_uuid = uuidutils.generate_uuid() kss_req.return_value = fake_requests.FakeResponse( HTTPStatus.NOT_FOUND, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_candidate_providers(rp_uuid) self.assertIsNone(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/test_scheduling.py0000664000175000017500000000467600000000000024517 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import eventlet from apscheduler.schedulers import background from watcher.common import scheduling from watcher import eventlet as eventlet_helper from watcher.tests import base class TestSchedulerMonkeyPatching(base.BaseTestCase): def setUp(self): super().setUp() self.started = False self.test_scheduler = scheduling.BackgroundSchedulerService() self.addCleanup(self._cleanup_scheduler) def _cleanup_scheduler(self): if self.started: self.test_scheduler.shutdown() self.started = False def _start_scheduler(self): self.test_scheduler.start() self.started = True @mock.patch.object(scheduling.BackgroundSchedulerService, 'start') def test_scheduler_start(self, mock_start): self.test_scheduler.start() mock_start.assert_called_once_with() @mock.patch.object(scheduling.BackgroundSchedulerService, 'shutdown') def test_scheduler_stop(self, mock_shutdown): self._start_scheduler() self.test_scheduler.stop() mock_shutdown.assert_called_once_with() @mock.patch.object(scheduling.BackgroundSchedulerService, '_main_loop') def test_scheduler_main_loop(self, mock_main_loop): self._start_scheduler() mock_main_loop.assert_called_once_with() @mock.patch.object(background.BackgroundScheduler, '_main_loop') @mock.patch.object(eventlet, 'monkey_patch') def test_main_loop_is_monkey_patched( self, mock_monky_patch, mock_main_loop): self.test_scheduler._main_loop() self.assertEqual( eventlet_helper.is_patched(), self.test_scheduler.should_patch) mock_monky_patch.assert_called_once_with() mock_main_loop.assert_called_once_with() def test_scheduler_should_patch(self): self.assertEqual( eventlet_helper.is_patched(), self.test_scheduler.should_patch) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/test_service.py0000664000175000017500000000671700000000000024030 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_config import cfg import oslo_messaging as om from watcher.common import service from watcher import objects from watcher.tests import base CONF = cfg.CONF class DummyEndpoint(object): def __init__(self, messaging): self._messaging = messaging class DummyManager(object): API_VERSION = '1.0' conductor_endpoints = [DummyEndpoint] notification_endpoints = [DummyEndpoint] def __init__(self): self.publisher_id = "pub_id" self.conductor_topic = "conductor_topic" self.notification_topics = [] self.api_version = self.API_VERSION self.service_name = None class TestServiceHeartbeat(base.TestCase): @mock.patch.object(objects.Service, 'list') @mock.patch.object(objects.Service, 'create') def test_send_beat_with_creating_service(self, mock_create, mock_list): CONF.set_default('host', 'fake-fqdn') mock_list.return_value = [] service.ServiceHeartbeat(service_name='watcher-service') mock_list.assert_called_once_with(mock.ANY, filters={'name': 'watcher-service', 'host': 'fake-fqdn'}) self.assertEqual(1, mock_create.call_count) @mock.patch.object(objects.Service, 'list') @mock.patch.object(objects.Service, 'save') def test_send_beat_without_creating_service(self, mock_save, mock_list): mock_list.return_value = [objects.Service(mock.Mock(), name='watcher-service', host='controller')] service.ServiceHeartbeat(service_name='watcher-service') self.assertEqual(1, mock_save.call_count) class TestService(base.TestCase): def setUp(self): super(TestService, self).setUp() @mock.patch.object(om.rpc.server, "RPCServer") def _test_start(self, m_handler): dummy_service = service.Service(DummyManager) dummy_service.start() self.assertEqual(1, m_handler.call_count) @mock.patch.object(om.rpc.server, "RPCServer") def _test_stop(self, m_handler): dummy_service = service.Service(DummyManager) dummy_service.stop() self.assertEqual(1, m_handler.call_count) def test_build_topic_handler(self): topic_name = "mytopic" dummy_service = service.Service(DummyManager) handler = dummy_service.build_topic_handler(topic_name) self.assertIsNotNone(handler) self.assertIsInstance(handler, om.rpc.server.RPCServer) self.assertEqual("mytopic", handler._target.topic) def test_init_service(self): dummy_service = service.Service(DummyManager) self.assertIsInstance( dummy_service.conductor_topic_handler, om.rpc.server.RPCServer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/common/test_utils.py0000664000175000017500000000324100000000000023515 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import asyncio import time from unittest import mock from watcher.common import utils from watcher.tests import base class TestCommonUtils(base.TestCase): async def test_coro(self, sleep=0, raise_exc=None): time.sleep(sleep) if raise_exc: raise raise_exc return mock.sentinel.ret_val def test_async_compat(self): ret_val = utils.async_compat_call(self.test_coro) self.assertEqual(mock.sentinel.ret_val, ret_val) def test_async_compat_exc(self): self.assertRaises( IOError, utils.async_compat_call, self.test_coro, raise_exc=IOError('fake error')) def test_async_compat_timeout(self): # Timeout not reached. ret_val = utils.async_compat_call(self.test_coro, timeout=10) self.assertEqual(mock.sentinel.ret_val, ret_val) # Timeout reached. self.assertRaises( asyncio.TimeoutError, utils.async_compat_call, self.test_coro, sleep=0.5, timeout=0.1) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6471353 python_watcher-14.0.0/watcher/tests/conf/0000775000175000017500000000000000000000000020401 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/conf/__init__.py0000664000175000017500000000000000000000000022500 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/conf/test_list_opts.py0000664000175000017500000001553700000000000024045 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # Copyright (c) 2016 Intel Corp # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_config import cfg from stevedore import extension from watcher.conf import opts from watcher.conf import plugins from watcher.tests import base from watcher.tests.decision_engine import fake_strategies class TestListOpts(base.TestCase): def setUp(self): super(TestListOpts, self).setUp() # These option groups will be registered using strings instead of # OptGroup objects this should be avoided if possible. self.none_objects = ['DEFAULT', 'watcher_clients_auth', 'watcher_strategies.strategy_1'] self.base_sections = [ 'DEFAULT', 'api', 'database', 'watcher_decision_engine', 'watcher_applier', 'watcher_datasources', 'watcher_planner', 'nova_client', 'glance_client', 'gnocchi_client', 'grafana_client', 'grafana_translators', 'cinder_client', 'monasca_client', 'ironic_client', 'keystone_client', 'neutron_client', 'watcher_clients_auth', 'collector', 'placement_client'] self.opt_sections = list(dict(opts.list_opts()).keys()) def _assert_name_or_group(self, actual_sections, expected_sections): for name_or_group, options in actual_sections: section_name = name_or_group if isinstance(name_or_group, cfg.OptGroup): section_name = name_or_group.name elif section_name in self.none_objects: pass else: # All option groups should be added to list_otps with an # OptGroup object for some exceptions this is not possible but # new groups should use OptGroup raise Exception( "Invalid option group: {0} should be of type OptGroup not " "string.".format(section_name)) self.assertIn(section_name, expected_sections) self.assertTrue(len(options)) def test_run_list_opts(self): expected_sections = self.opt_sections result = opts.list_opts() self.assertIsNotNone(result) for section_name, options in result: self.assertIn(section_name, expected_sections) self.assertTrue(len(options)) def test_list_opts_no_opts(self): expected_sections = self.base_sections # Set up the fake Stevedore extensions fake_extmanager_call = extension.ExtensionManager.make_test_instance( extensions=[extension.Extension( name=fake_strategies.FakeDummy1Strategy2.get_name(), entry_point="%s:%s" % ( fake_strategies.FakeDummy1Strategy2.__module__, fake_strategies.FakeDummy1Strategy2.__name__), plugin=fake_strategies.FakeDummy1Strategy2, obj=None, )], namespace="watcher_strategies", ) def m_list_available(namespace): if namespace == "watcher_strategies": return fake_extmanager_call else: return extension.ExtensionManager.make_test_instance( extensions=[], namespace=namespace) with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: m_ext_manager.side_effect = m_list_available result = opts.list_opts() self._assert_name_or_group(result, expected_sections) self.assertIsNotNone(result) def test_list_opts_with_opts(self): expected_sections = self.base_sections + [ 'watcher_strategies.strategy_1'] # Set up the fake Stevedore extensions fake_extmanager_call = extension.ExtensionManager.make_test_instance( extensions=[extension.Extension( name=fake_strategies.FakeDummy1Strategy1.get_name(), entry_point="%s:%s" % ( fake_strategies.FakeDummy1Strategy1.__module__, fake_strategies.FakeDummy1Strategy1.__name__), plugin=fake_strategies.FakeDummy1Strategy1, obj=None, )], namespace="watcher_strategies", ) def m_list_available(namespace): if namespace == "watcher_strategies": return fake_extmanager_call else: return extension.ExtensionManager.make_test_instance( extensions=[], namespace=namespace) with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: m_ext_manager.side_effect = m_list_available result = opts.list_opts() self.assertIsNotNone(result) self._assert_name_or_group(result, expected_sections) result_map = dict(result) strategy_opts = result_map['watcher_strategies.strategy_1'] self.assertEqual(['test_opt'], [opt.name for opt in strategy_opts]) class TestPlugins(base.TestCase): def test_show_plugins(self): # Set up the fake Stevedore extensions fake_extmanager_call = extension.ExtensionManager.make_test_instance( extensions=[extension.Extension( name=fake_strategies.FakeDummy1Strategy1.get_name(), entry_point="%s:%s" % ( fake_strategies.FakeDummy1Strategy1.__module__, fake_strategies.FakeDummy1Strategy1.__name__), plugin=fake_strategies.FakeDummy1Strategy1, obj=None, )], namespace="watcher_strategies", ) def m_list_available(namespace): if namespace == "watcher_strategies": return fake_extmanager_call else: return extension.ExtensionManager.make_test_instance( extensions=[], namespace=namespace) with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: with mock.patch.object( plugins, "_show_plugins_ascii_table" ) as m_show: m_ext_manager.side_effect = m_list_available plugins.show_plugins() m_show.assert_called_once_with( [('watcher_strategies.strategy_1', 'strategy_1', 'watcher.tests.decision_engine.' 'fake_strategies.FakeDummy1Strategy1')]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/conf_fixture.py0000664000175000017500000000340200000000000022520 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_config import fixture as conf_fixture from watcher.common import config class ConfFixture(conf_fixture.Config): """Fixture to manage conf settings.""" def setUp(self): super(ConfFixture, self).setUp() self.conf.set_default('connection', "sqlite://", group='database') self.conf.set_default('sqlite_synchronous', False, group='database') config.parse_args([], default_config_files=[]) class ConfReloadFixture(ConfFixture): """Fixture to manage reloads of conf settings.""" def __init__(self, conf=cfg.CONF): self.conf = conf self._original_parse_cli_opts = self.conf._parse_cli_opts def _fake_parser(self, *args, **kw): return cfg.ConfigOpts._parse_cli_opts(self.conf, []) def _restore_parser(self): self.conf._parse_cli_opts = self._original_parse_cli_opts def setUp(self): super(ConfReloadFixture, self).setUp() self.conf._parse_cli_opts = self._fake_parser self.addCleanup(self._restore_parser) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/config.py0000664000175000017500000000205400000000000021274 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.api import hooks # Server Specific Configurations server = { 'port': '9322', 'host': '0.0.0.0' } # Pecan Application Configurations app = { 'root': 'watcher.api.controllers.root.RootController', 'modules': ['watcher.api'], 'hooks': [ hooks.ContextHook(), ], 'acl_public_routes': [ '/' ], } # Custom Configurations must be in Python dictionary format:: # # foo = {'bar':'baz'} # # All configurations are accessible at:: # pecan.conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6471353 python_watcher-14.0.0/watcher/tests/db/0000775000175000017500000000000000000000000020041 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/__init__.py0000664000175000017500000000000000000000000022140 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/base.py0000664000175000017500000000517000000000000021330 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Watcher DB test base class.""" import fixtures from oslo_config import cfg from oslo_db.sqlalchemy import enginefacade from watcher.db import api as dbapi from watcher.db.sqlalchemy import migration from watcher.db.sqlalchemy import models from watcher.tests import base from watcher.tests.db import utils CONF = cfg.CONF CONF.import_opt('enable_authentication', 'watcher.api.acl') _DB_CACHE = None class Database(fixtures.Fixture): def __init__(self, engine, db_migrate, sql_connection): self.sql_connection = sql_connection self.engine = engine self.engine.dispose() with self.engine.connect() as conn: self.setup_sqlite(db_migrate) self.post_migrations() self._DB = "".join(line for line in conn.connection.iterdump()) self.engine.dispose() def setup_sqlite(self, db_migrate): if db_migrate.version(): return models.Base.metadata.create_all(self.engine) db_migrate.stamp('head') def setUp(self): super(Database, self).setUp() with self.engine.connect() as conn: conn.connection.executescript(self._DB) self.addCleanup(self.engine.dispose) def post_migrations(self): """Any addition steps that are needed outside of the migrations.""" class DbTestCase(base.TestCase): def get_next_id(self): return next(self._id_gen) def setUp(self): cfg.CONF.set_override("enable_authentication", False) # To use in-memory SQLite DB cfg.CONF.set_override("connection", "sqlite://", group="database") super(DbTestCase, self).setUp() self.dbapi = dbapi.get_instance() global _DB_CACHE if not _DB_CACHE: engine = enginefacade.writer.get_engine() _DB_CACHE = Database(engine, migration, sql_connection=CONF.database.connection) engine.dispose() self.useFixture(_DB_CACHE) self._id_gen = utils.id_generator() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/test_action.py0000664000175000017500000003562700000000000022744 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Action via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestDbActionFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbActionFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): self.audit_template_name = "Audit Template" self.audit_template = utils.create_test_audit_template( name=self.audit_template_name, id=1, uuid=None) self.audit = utils.create_test_audit( audit_template_id=self.audit_template.id, id=1, uuid=None) self.action_plan = utils.create_test_action_plan( audit_id=self.audit.id, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_TODAY): self.action1 = utils.create_test_action( action_plan_id=self.action_plan.id, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.action2 = utils.create_test_action( action_plan_id=self.action_plan.id, id=2, uuid=None) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.action3 = utils.create_test_action( action_plan_id=self.action_plan.id, id=3, uuid=None) def _soft_delete_actions(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action(self.action1.uuid) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_action(self.action2.uuid) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_action(self.action3.uuid) def _update_actions(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_action( self.action1.uuid, values={"state": objects.action_plan.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_action( self.action2.uuid, values={"state": objects.action_plan.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_action( self.action3.uuid, values={"state": objects.action_plan.State.SUCCEEDED}) def test_get_action_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action(self.action1.uuid) res = self.dbapi.get_action_list( self.context, filters={'deleted': True}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action(self.action1.uuid) res = self.dbapi.get_action_list( self.context, filters={'deleted': False}) self.assertEqual([self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_deleted_at_eq(self): self._soft_delete_actions() res = self.dbapi.get_action_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_deleted_at_lt(self): self._soft_delete_actions() res = self.dbapi.get_action_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_deleted_at_lte(self): self._soft_delete_actions() res = self.dbapi.get_action_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_deleted_at_gt(self): self._soft_delete_actions() res = self.dbapi.get_action_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_deleted_at_gte(self): self._soft_delete_actions() res = self.dbapi.get_action_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action1['id'], self.action2['id']], [r.id for r in res]) # created_at # def test_get_action_filter_created_at_eq(self): res = self.dbapi.get_action_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_created_at_lt(self): with freezegun.freeze_time(self.FAKE_TODAY): res = self.dbapi.get_action_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_created_at_lte(self): res = self.dbapi.get_action_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_created_at_gt(self): res = self.dbapi.get_action_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_created_at_gte(self): res = self.dbapi.get_action_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action1['id'], self.action2['id']], [r.id for r in res]) # updated_at # def test_get_action_filter_updated_at_eq(self): self._update_actions() res = self.dbapi.get_action_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_updated_at_lt(self): self._update_actions() res = self.dbapi.get_action_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_updated_at_lte(self): self._update_actions() res = self.dbapi.get_action_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_updated_at_gt(self): self._update_actions() res = self.dbapi.get_action_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_updated_at_gte(self): self._update_actions() res = self.dbapi.get_action_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action1['id'], self.action2['id']], [r.id for r in res]) class DbActionTestCase(base.DbTestCase): def test_get_action_list(self): uuids = [] for _ in range(1, 4): action = utils.create_test_action(uuid=w_utils.generate_uuid()) uuids.append(str(action['uuid'])) actions = self.dbapi.get_action_list(self.context) action_uuids = [a.uuid for a in actions] self.assertEqual(3, len(action_uuids)) self.assertEqual(sorted(uuids), sorted(action_uuids)) for action in actions: self.assertIsNone(action.action_plan) def test_get_action_list_eager(self): _action_plan = utils.get_test_action_plan() action_plan = self.dbapi.create_action_plan(_action_plan) uuids = [] for i in range(1, 4): action = utils.create_test_action( id=i, uuid=w_utils.generate_uuid(), action_plan_id=action_plan.id) uuids.append(str(action['uuid'])) actions = self.dbapi.get_action_list(self.context, eager=True) action_map = {a.uuid: a for a in actions} self.assertEqual(sorted(uuids), sorted(action_map.keys())) eager_action = action_map[action.uuid] self.assertEqual( action_plan.as_dict(), eager_action.action_plan.as_dict()) def test_get_action_list_with_filters(self): audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) action_plan = utils.create_test_action_plan( id=1, uuid=w_utils.generate_uuid(), audit_id=audit.id, parents=None, state=objects.action_plan.State.RECOMMENDED) action1 = utils.create_test_action( id=1, action_plan_id=action_plan['id'], description='description action 1', uuid=w_utils.generate_uuid(), parents=None, state=objects.action_plan.State.PENDING) action2 = utils.create_test_action( id=2, action_plan_id=2, description='description action 2', uuid=w_utils.generate_uuid(), parents=[action1['uuid']], state=objects.action_plan.State.PENDING) action3 = utils.create_test_action( id=3, action_plan_id=action_plan['id'], description='description action 3', uuid=w_utils.generate_uuid(), parents=[action2['uuid']], state=objects.action_plan.State.ONGOING) action4 = utils.create_test_action( id=4, action_plan_id=action_plan['id'], description='description action 4', uuid=w_utils.generate_uuid(), parents=None, state=objects.action_plan.State.ONGOING) self.dbapi.soft_delete_action(action4['uuid']) res = self.dbapi.get_action_list( self.context, filters={'state': objects.action_plan.State.ONGOING}) self.assertEqual([action3['id']], [r.id for r in res]) res = self.dbapi.get_action_list(self.context, filters={'state': 'bad-state'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_action_list( self.context, filters={'action_plan_id': 2}) self.assertEqual([action2['id']], [r.id for r in res]) res = self.dbapi.get_action_list( self.context, filters={'action_plan_uuid': action_plan['uuid']}) self.assertEqual( sorted([action1['id'], action3['id']]), sorted([r.id for r in res])) temp_context = self.context temp_context.show_deleted = True res = self.dbapi.get_action_list( temp_context, filters={'action_plan_uuid': action_plan['uuid']}) self.assertEqual( sorted([action1['id'], action3['id'], action4['id']]), sorted([r.id for r in res])) res = self.dbapi.get_action_list( self.context, filters={'audit_uuid': audit.uuid}) for action in res: self.assertEqual(action_plan['id'], action.action_plan_id) def test_get_action_list_with_filter_by_uuid(self): action = utils.create_test_action() res = self.dbapi.get_action_list( self.context, filters={'uuid': action["uuid"]}) self.assertEqual(len(res), 1) self.assertEqual(action['uuid'], res[0].uuid) def test_get_action_by_id(self): action = utils.create_test_action() action = self.dbapi.get_action_by_id(self.context, action['id']) self.assertEqual(action['uuid'], action.uuid) def test_get_action_by_uuid(self): action = utils.create_test_action() action = self.dbapi.get_action_by_uuid(self.context, action['uuid']) self.assertEqual(action['id'], action.id) def test_get_action_that_does_not_exist(self): self.assertRaises(exception.ActionNotFound, self.dbapi.get_action_by_id, self.context, 1234) def test_update_action(self): action = utils.create_test_action() res = self.dbapi.update_action( action['id'], {'state': objects.action_plan.State.CANCELLED}) self.assertEqual(objects.action_plan.State.CANCELLED, res.state) def test_update_action_that_does_not_exist(self): self.assertRaises(exception.ActionNotFound, self.dbapi.update_action, 1234, {'state': ''}) def test_update_action_uuid(self): action = utils.create_test_action() self.assertRaises(exception.Invalid, self.dbapi.update_action, action['id'], {'uuid': 'hello'}) def test_destroy_action(self): action = utils.create_test_action() self.dbapi.destroy_action(action['id']) self.assertRaises(exception.ActionNotFound, self.dbapi.get_action_by_id, self.context, action['id']) def test_destroy_action_by_uuid(self): uuid = w_utils.generate_uuid() utils.create_test_action(uuid=uuid) self.assertIsNotNone(self.dbapi.get_action_by_uuid(self.context, uuid)) self.dbapi.destroy_action(uuid) self.assertRaises(exception.ActionNotFound, self.dbapi.get_action_by_uuid, self.context, uuid) def test_destroy_action_that_does_not_exist(self): self.assertRaises(exception.ActionNotFound, self.dbapi.destroy_action, 1234) def test_create_action_already_exists(self): uuid = w_utils.generate_uuid() utils.create_test_action(id=1, uuid=uuid) self.assertRaises(exception.ActionAlreadyExists, utils.create_test_action, id=2, uuid=uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/test_action_description.py0000664000175000017500000002601200000000000025333 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for manipulating ActionDescription via the DB API""" import freezegun from watcher.common import exception from watcher.tests.db import base from watcher.tests.db import utils class TestDbActionDescriptionFilters(base.DbTestCase): FAKE_OLDER_DATE = '2015-01-01T09:52:05.219414' FAKE_OLD_DATE = '2016-01-01T09:52:05.219414' FAKE_TODAY = '2017-02-24T09:52:05.219414' def setUp(self): super(TestDbActionDescriptionFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): action_desc1_type = "nop" action_desc2_type = "sleep" action_desc3_type = "resize" with freezegun.freeze_time(self.FAKE_TODAY): self.action_desc1 = utils.create_test_action_desc( id=1, action_type=action_desc1_type, description="description") with freezegun.freeze_time(self.FAKE_OLD_DATE): self.action_desc2 = utils.create_test_action_desc( id=2, action_type=action_desc2_type, description="description") with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.action_desc3 = utils.create_test_action_desc( id=3, action_type=action_desc3_type, description="description") def _soft_delete_action_descs(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_description(self.action_desc1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_action_description(self.action_desc2.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_action_description(self.action_desc3.id) def _update_action_descs(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_action_description( self.action_desc1.id, values={"description": "nop description"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_action_description( self.action_desc2.id, values={"description": "sleep description"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_action_description( self.action_desc3.id, values={"description": "resize description"}) def test_get_action_desc_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_description(self.action_desc1.id) res = self.dbapi.get_action_description_list( self.context, filters={'deleted': True}) self.assertEqual([self.action_desc1['action_type']], [r.action_type for r in res]) def test_get_action_desc_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_description(self.action_desc1.id) res = self.dbapi.get_action_description_list( self.context, filters={'deleted': False}) self.assertEqual( set([self.action_desc2['action_type'], self.action_desc3['action_type']]), set([r.action_type for r in res])) def test_get_action_desc_list_filter_deleted_at_eq(self): self._soft_delete_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_deleted_at_lt(self): self._soft_delete_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_deleted_at_lte(self): self._soft_delete_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_deleted_at_gt(self): self._soft_delete_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_deleted_at_gte(self): self._soft_delete_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc1['id'], self.action_desc2['id']]), set([r.id for r in res])) # created_at # def test_get_action_desc_list_filter_created_at_eq(self): res = self.dbapi.get_action_description_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_created_at_lt(self): res = self.dbapi.get_action_description_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_created_at_lte(self): res = self.dbapi.get_action_description_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_created_at_gt(self): res = self.dbapi.get_action_description_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_created_at_gte(self): res = self.dbapi.get_action_description_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc1['id'], self.action_desc2['id']]), set([r.id for r in res])) # updated_at # def test_get_action_desc_list_filter_updated_at_eq(self): self._update_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_updated_at_lt(self): self._update_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_updated_at_lte(self): self._update_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_updated_at_gt(self): self._update_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_updated_at_gte(self): self._update_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc1['id'], self.action_desc2['id']]), set([r.id for r in res])) class DbActionDescriptionTestCase(base.DbTestCase): def test_get_action_desc_list(self): ids = [] for i in range(1, 4): action_desc = utils.create_test_action_desc( id=i, action_type="action_%s" % i, description="description_{0}".format(i)) ids.append(action_desc['id']) action_descs = self.dbapi.get_action_description_list(self.context) action_desc_ids = [s.id for s in action_descs] self.assertEqual(sorted(ids), sorted(action_desc_ids)) def test_get_action_desc_list_with_filters(self): action_desc1 = utils.create_test_action_desc( id=1, action_type="action_1", description="description_1", ) action_desc2 = utils.create_test_action_desc( id=2, action_type="action_2", description="description_2", ) res = self.dbapi.get_action_description_list( self.context, filters={'action_type': 'action_1'}) self.assertEqual([action_desc1['id']], [r.id for r in res]) res = self.dbapi.get_action_description_list( self.context, filters={'action_type': 'action_3'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_action_description_list( self.context, filters={'action_type': 'action_2'}) self.assertEqual([action_desc2['id']], [r.id for r in res]) def test_get_action_desc_by_type(self): created_action_desc = utils.create_test_action_desc() action_desc = self.dbapi.get_action_description_by_type( self.context, created_action_desc['action_type']) self.assertEqual(action_desc.action_type, created_action_desc['action_type']) def test_get_action_desc_that_does_not_exist(self): self.assertRaises(exception.ActionDescriptionNotFound, self.dbapi.get_action_description_by_id, self.context, 404) def test_update_action_desc(self): action_desc = utils.create_test_action_desc() res = self.dbapi.update_action_description( action_desc['id'], {'description': 'description_test'}) self.assertEqual('description_test', res.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/test_action_plan.py0000664000175000017500000003640000000000000023744 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating ActionPlan via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher.objects import action_plan as ap_objects from watcher.tests.db import base from watcher.tests.db import utils class TestDbActionPlanFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbActionPlanFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): self.audit_template_name = "Audit Template" self.audit_template = utils.create_test_audit_template( name=self.audit_template_name, id=1, uuid=None) self.audit = utils.create_test_audit( audit_template_id=self.audit_template.id, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_TODAY): self.action_plan1 = utils.create_test_action_plan( audit_id=self.audit.id, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.action_plan2 = utils.create_test_action_plan( audit_id=self.audit.id, id=2, uuid=None) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.action_plan3 = utils.create_test_action_plan( audit_id=self.audit.id, id=3, uuid=None) def _soft_delete_action_plans(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_action_plan(self.action_plan2.uuid) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_action_plan(self.action_plan3.uuid) def _update_action_plans(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_action_plan( self.action_plan1.uuid, values={"state": ap_objects.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_action_plan( self.action_plan2.uuid, values={"state": ap_objects.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_action_plan( self.action_plan3.uuid, values={"state": ap_objects.State.SUCCEEDED}) def test_get_action_plan_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) res = self.dbapi.get_action_plan_list( self.context, filters={'deleted': True}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) res = self.dbapi.get_action_plan_list( self.context, filters={'deleted': False}) self.assertEqual([self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_at_eq(self): self._soft_delete_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_at_lt(self): self._soft_delete_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_at_lte(self): self._soft_delete_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_at_gt(self): self._soft_delete_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_at_gte(self): self._soft_delete_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan1['id'], self.action_plan2['id']], [r.id for r in res]) # created_at # def test_get_action_plan_list_filter_created_at_eq(self): res = self.dbapi.get_action_plan_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_created_at_lt(self): res = self.dbapi.get_action_plan_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_created_at_lte(self): res = self.dbapi.get_action_plan_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_created_at_gt(self): res = self.dbapi.get_action_plan_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_created_at_gte(self): res = self.dbapi.get_action_plan_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan1['id'], self.action_plan2['id']], [r.id for r in res]) # updated_at # def test_get_action_plan_list_filter_updated_at_eq(self): self._update_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_updated_at_lt(self): self._update_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_updated_at_lte(self): self._update_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_updated_at_gt(self): self._update_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_updated_at_gte(self): self._update_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan1['id'], self.action_plan2['id']], [r.id for r in res]) class DbActionPlanTestCase(base.DbTestCase): def test_get_action_plan_list(self): uuids = [] for _ in range(1, 4): action_plan = utils.create_test_action_plan( uuid=w_utils.generate_uuid()) uuids.append(str(action_plan['uuid'])) action_plans = self.dbapi.get_action_plan_list(self.context) action_plan_uuids = [ap.uuid for ap in action_plans] self.assertEqual(sorted(uuids), sorted(action_plan_uuids)) for action_plan in action_plans: self.assertIsNone(action_plan.audit) self.assertIsNone(action_plan.strategy) def test_get_action_plan_list_eager(self): _strategy = utils.get_test_strategy() strategy = self.dbapi.create_strategy(_strategy) _audit = utils.get_test_audit() audit = self.dbapi.create_audit(_audit) uuids = [] for _ in range(1, 4): action_plan = utils.create_test_action_plan( uuid=w_utils.generate_uuid()) uuids.append(str(action_plan['uuid'])) action_plans = self.dbapi.get_action_plan_list( self.context, eager=True) action_plan_map = {a.uuid: a for a in action_plans} self.assertEqual(sorted(uuids), sorted(action_plan_map.keys())) eager_action_plan = action_plan_map[action_plan.uuid] self.assertEqual( strategy.as_dict(), eager_action_plan.strategy.as_dict()) self.assertEqual(audit.as_dict(), eager_action_plan.audit.as_dict()) def test_get_action_plan_list_with_filters(self): audit = utils.create_test_audit( id=2, audit_type='ONESHOT', uuid=w_utils.generate_uuid(), state=ap_objects.State.ONGOING) action_plan1 = utils.create_test_action_plan( id=1, uuid=w_utils.generate_uuid(), audit_id=audit['id'], state=ap_objects.State.RECOMMENDED) action_plan2 = utils.create_test_action_plan( id=2, uuid=w_utils.generate_uuid(), audit_id=audit['id'], state=ap_objects.State.ONGOING) action_plan3 = utils.create_test_action_plan( id=3, uuid=w_utils.generate_uuid(), audit_id=audit['id'], state=ap_objects.State.RECOMMENDED) # check on bug 1761956 self.dbapi.soft_delete_action_plan(action_plan3['uuid']) res = self.dbapi.get_action_plan_list( self.context, filters={'state': ap_objects.State.RECOMMENDED}) self.assertEqual([action_plan1['id']], [r.id for r in res]) res = self.dbapi.get_action_plan_list( self.context, filters={'state': ap_objects.State.ONGOING}) self.assertEqual([action_plan2['id']], [r.id for r in res]) res = self.dbapi.get_action_plan_list( self.context, filters={'audit_uuid': audit['uuid']}) self.assertEqual( sorted([action_plan1['id'], action_plan2['id']]), sorted([r.id for r in res])) for r in res: self.assertEqual(audit['id'], r.audit_id) self.dbapi.soft_delete_action_plan(action_plan1['uuid']) res = self.dbapi.get_action_plan_list( self.context, filters={'audit_uuid': audit['uuid']}) self.assertEqual([action_plan2['id']], [r.id for r in res]) self.assertNotEqual([action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_with_filter_by_uuid(self): action_plan = utils.create_test_action_plan() res = self.dbapi.get_action_plan_list( self.context, filters={'uuid': action_plan["uuid"]}) self.assertEqual(len(res), 1) self.assertEqual(action_plan['uuid'], res[0].uuid) def test_get_action_plan_by_id(self): action_plan = utils.create_test_action_plan() action_plan = self.dbapi.get_action_plan_by_id( self.context, action_plan['id']) self.assertEqual(action_plan['uuid'], action_plan.uuid) def test_get_action_plan_by_uuid(self): action_plan = utils.create_test_action_plan() action_plan = self.dbapi.get_action_plan_by_uuid( self.context, action_plan['uuid']) self.assertEqual(action_plan['id'], action_plan.id) def test_get_action_plan_that_does_not_exist(self): self.assertRaises(exception.ActionPlanNotFound, self.dbapi.get_action_plan_by_id, self.context, 1234) def test_update_action_plan(self): action_plan = utils.create_test_action_plan() res = self.dbapi.update_action_plan( action_plan['id'], {'name': 'updated-model'}) self.assertEqual('updated-model', res.name) def test_update_action_plan_that_does_not_exist(self): self.assertRaises(exception.ActionPlanNotFound, self.dbapi.update_action_plan, 1234, {'name': ''}) def test_update_action_plan_uuid(self): action_plan = utils.create_test_action_plan() self.assertRaises(exception.Invalid, self.dbapi.update_action_plan, action_plan['id'], {'uuid': 'hello'}) def test_destroy_action_plan(self): action_plan = utils.create_test_action_plan() self.dbapi.destroy_action_plan(action_plan['id']) self.assertRaises(exception.ActionPlanNotFound, self.dbapi.get_action_plan_by_id, self.context, action_plan['id']) def test_destroy_action_plan_by_uuid(self): uuid = w_utils.generate_uuid() utils.create_test_action_plan(uuid=uuid) self.assertIsNotNone(self.dbapi.get_action_plan_by_uuid( self.context, uuid)) self.dbapi.destroy_action_plan(uuid) self.assertRaises(exception.ActionPlanNotFound, self.dbapi.get_action_plan_by_uuid, self.context, uuid) def test_destroy_action_plan_that_does_not_exist(self): self.assertRaises(exception.ActionPlanNotFound, self.dbapi.destroy_action_plan, 1234) def test_destroy_action_plan_that_referenced_by_actions(self): action_plan = utils.create_test_action_plan() action = utils.create_test_action(action_plan_id=action_plan['id']) self.assertEqual(action_plan['id'], action.action_plan_id) self.assertRaises(exception.ActionPlanReferenced, self.dbapi.destroy_action_plan, action_plan['id']) def test_create_action_plan_already_exists(self): uuid = w_utils.generate_uuid() utils.create_test_action_plan(id=1, uuid=uuid) self.assertRaises(exception.ActionPlanAlreadyExists, utils.create_test_action_plan, id=2, uuid=uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/test_audit.py0000664000175000017500000004055500000000000022571 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Audit via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestDbAuditFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbAuditFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): self.audit_template_name = "Audit Template" def gen_name(): return "Audit %s" % w_utils.generate_uuid() self.audit1_name = gen_name() self.audit2_name = gen_name() self.audit3_name = gen_name() self.audit4_name = gen_name() self.audit_template = utils.create_test_audit_template( name=self.audit_template_name, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_TODAY): self.audit1 = utils.create_test_audit( audit_template_id=self.audit_template.id, id=1, uuid=None, name=self.audit1_name) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.audit2 = utils.create_test_audit( audit_template_id=self.audit_template.id, id=2, uuid=None, name=self.audit2_name, state=objects.audit.State.FAILED) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.audit3 = utils.create_test_audit( audit_template_id=self.audit_template.id, id=3, uuid=None, name=self.audit3_name, state=objects.audit.State.CANCELLED) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.audit4 = utils.create_test_audit( audit_template_id=self.audit_template.id, id=4, uuid=None, name=self.audit4_name, state=objects.audit.State.SUSPENDED) def _soft_delete_audits(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit(self.audit1.uuid) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_audit(self.audit2.uuid) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_audit(self.audit3.uuid) def _update_audits(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_audit( self.audit1.uuid, values={"state": objects.audit.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_audit( self.audit2.uuid, values={"state": objects.audit.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_audit( self.audit3.uuid, values={"state": objects.audit.State.SUCCEEDED}) def test_get_audit_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit(self.audit1.uuid) res = self.dbapi.get_audit_list( self.context, filters={'deleted': True}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit(self.audit1.uuid) res = self.dbapi.get_audit_list( self.context, filters={'deleted': False}) self.assertEqual( [self.audit2['id'], self.audit3['id'], self.audit4['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_at_eq(self): self._soft_delete_audits() res = self.dbapi.get_audit_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_at_lt(self): self._soft_delete_audits() res = self.dbapi.get_audit_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit2['id'], self.audit3['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_at_lte(self): self._soft_delete_audits() res = self.dbapi.get_audit_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit2['id'], self.audit3['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_at_gt(self): self._soft_delete_audits() res = self.dbapi.get_audit_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_at_gte(self): self._soft_delete_audits() res = self.dbapi.get_audit_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit1['id'], self.audit2['id']], [r.id for r in res]) # created_at # def test_get_audit_list_filter_created_at_eq(self): res = self.dbapi.get_audit_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_created_at_lt(self): res = self.dbapi.get_audit_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit2['id'], self.audit3['id'], self.audit4['id']], [r.id for r in res]) def test_get_audit_list_filter_created_at_lte(self): res = self.dbapi.get_audit_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit2['id'], self.audit3['id'], self.audit4['id']], [r.id for r in res]) def test_get_audit_list_filter_created_at_gt(self): res = self.dbapi.get_audit_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_created_at_gte(self): res = self.dbapi.get_audit_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit1['id'], self.audit2['id']], [r.id for r in res]) # updated_at # def test_get_audit_list_filter_updated_at_eq(self): self._update_audits() res = self.dbapi.get_audit_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_updated_at_lt(self): self._update_audits() res = self.dbapi.get_audit_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit2['id'], self.audit3['id']], [r.id for r in res]) def test_get_audit_list_filter_updated_at_lte(self): self._update_audits() res = self.dbapi.get_audit_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit2['id'], self.audit3['id']], [r.id for r in res]) def test_get_audit_list_filter_updated_at_gt(self): self._update_audits() res = self.dbapi.get_audit_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_updated_at_gte(self): self._update_audits() res = self.dbapi.get_audit_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit1['id'], self.audit2['id']], [r.id for r in res]) def test_get_audit_list_filter_state_in(self): res = self.dbapi.get_audit_list( self.context, filters={ 'state__in': objects.audit.AuditStateTransitionManager.INACTIVE_STATES }) self.assertEqual( [self.audit2['id'], self.audit3['id'], self.audit4['id']], [r.id for r in res]) def test_get_audit_list_filter_state_notin(self): res = self.dbapi.get_audit_list( self.context, filters={ 'state__notin': objects.audit.AuditStateTransitionManager.INACTIVE_STATES }) self.assertEqual( [self.audit1['id']], [r.id for r in res]) class DbAuditTestCase(base.DbTestCase): def test_get_audit_list(self): uuids = [] for id_ in range(1, 4): audit = utils.create_test_audit(uuid=w_utils.generate_uuid(), name='My Audit {0}'.format(id_)) uuids.append(str(audit['uuid'])) audits = self.dbapi.get_audit_list(self.context) audit_uuids = [a.uuid for a in audits] self.assertEqual(sorted(uuids), sorted(audit_uuids)) for audit in audits: self.assertIsNone(audit.goal) self.assertIsNone(audit.strategy) def test_get_audit_list_eager(self): _goal = utils.get_test_goal() goal = self.dbapi.create_goal(_goal) _strategy = utils.get_test_strategy() strategy = self.dbapi.create_strategy(_strategy) uuids = [] for i in range(1, 4): audit = utils.create_test_audit( id=i, uuid=w_utils.generate_uuid(), name='My Audit {0}'.format(i), goal_id=goal.id, strategy_id=strategy.id) uuids.append(str(audit['uuid'])) audits = self.dbapi.get_audit_list(self.context, eager=True) audit_map = {a.uuid: a for a in audits} self.assertEqual(sorted(uuids), sorted(audit_map.keys())) eager_audit = audit_map[audit.uuid] self.assertEqual(goal.as_dict(), eager_audit.goal.as_dict()) self.assertEqual(strategy.as_dict(), eager_audit.strategy.as_dict()) def test_get_audit_list_with_filters(self): goal = utils.create_test_goal(name='DUMMY') audit1 = utils.create_test_audit( id=1, audit_type=objects.audit.AuditType.ONESHOT.value, uuid=w_utils.generate_uuid(), name='My Audit {0}'.format(1), state=objects.audit.State.ONGOING, goal_id=goal['id']) audit2 = utils.create_test_audit( id=2, audit_type=objects.audit.AuditType.CONTINUOUS.value, uuid=w_utils.generate_uuid(), name='My Audit {0}'.format(2), state=objects.audit.State.PENDING, goal_id=goal['id']) audit3 = utils.create_test_audit( id=3, audit_type=objects.audit.AuditType.CONTINUOUS.value, uuid=w_utils.generate_uuid(), name='My Audit {0}'.format(3), state=objects.audit.State.ONGOING, goal_id=goal['id']) self.dbapi.soft_delete_audit(audit3['uuid']) res = self.dbapi.get_audit_list( self.context, filters={'audit_type': objects.audit.AuditType.ONESHOT.value}) self.assertEqual([audit1['id']], [r.id for r in res]) res = self.dbapi.get_audit_list( self.context, filters={'audit_type': 'bad-type'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_audit_list( self.context, filters={'state': objects.audit.State.ONGOING}) self.assertEqual([audit1['id']], [r.id for r in res]) res = self.dbapi.get_audit_list( self.context, filters={'state': objects.audit.State.PENDING}) self.assertEqual([audit2['id']], [r.id for r in res]) res = self.dbapi.get_audit_list( self.context, filters={'goal_name': 'DUMMY'}) self.assertEqual(sorted([audit1['id'], audit2['id']]), sorted([r.id for r in res])) temp_context = self.context temp_context.show_deleted = True res = self.dbapi.get_audit_list( temp_context, filters={'goal_name': 'DUMMY'}) self.assertEqual(sorted([audit1['id'], audit2['id'], audit3['id']]), sorted([r.id for r in res])) def test_get_audit_list_with_filter_by_uuid(self): audit = utils.create_test_audit() res = self.dbapi.get_audit_list( self.context, filters={'uuid': audit["uuid"]}) self.assertEqual(len(res), 1) self.assertEqual(audit['uuid'], res[0].uuid) def test_get_audit_by_id(self): audit = utils.create_test_audit() audit = self.dbapi.get_audit_by_id(self.context, audit['id']) self.assertEqual(audit['uuid'], audit.uuid) def test_get_audit_by_uuid(self): audit = utils.create_test_audit() audit = self.dbapi.get_audit_by_uuid(self.context, audit['uuid']) self.assertEqual(audit['id'], audit.id) def test_get_audit_that_does_not_exist(self): self.assertRaises(exception.AuditNotFound, self.dbapi.get_audit_by_id, self.context, 1234) def test_update_audit(self): audit = utils.create_test_audit() res = self.dbapi.update_audit(audit['id'], {'name': 'updated-model'}) self.assertEqual('updated-model', res.name) def test_update_audit_that_does_not_exist(self): self.assertRaises(exception.AuditNotFound, self.dbapi.update_audit, 1234, {'name': ''}) def test_update_audit_uuid(self): audit = utils.create_test_audit() self.assertRaises(exception.Invalid, self.dbapi.update_audit, audit['id'], {'uuid': 'hello'}) def test_destroy_audit(self): audit = utils.create_test_audit() self.dbapi.destroy_audit(audit['id']) self.assertRaises(exception.AuditNotFound, self.dbapi.get_audit_by_id, self.context, audit['id']) def test_destroy_audit_by_uuid(self): audit = utils.create_test_audit() self.assertIsNotNone(self.dbapi.get_audit_by_uuid(self.context, audit['uuid'])) self.dbapi.destroy_audit(audit['uuid']) self.assertRaises(exception.AuditNotFound, self.dbapi.get_audit_by_uuid, self.context, audit['uuid']) def test_destroy_audit_that_does_not_exist(self): self.assertRaises(exception.AuditNotFound, self.dbapi.destroy_audit, 1234) def test_destroy_audit_that_referenced_by_action_plans(self): audit = utils.create_test_audit() action_plan = utils.create_test_action_plan(audit_id=audit['id']) self.assertEqual(audit['id'], action_plan.audit_id) self.assertRaises(exception.AuditReferenced, self.dbapi.destroy_audit, audit['id']) def test_create_audit_already_exists(self): uuid = w_utils.generate_uuid() utils.create_test_audit(id=1, uuid=uuid) self.assertRaises(exception.AuditAlreadyExists, utils.create_test_audit, id=2, uuid=uuid) def test_create_same_name_audit(self): audit = utils.create_test_audit( uuid=w_utils.generate_uuid(), name='my_audit') self.assertEqual(audit['uuid'], audit.uuid) self.assertRaises( exception.AuditAlreadyExists, utils.create_test_audit, uuid=w_utils.generate_uuid(), name='my_audit') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/test_audit_template.py0000664000175000017500000004021500000000000024455 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating AuditTemplate via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher.tests.db import base from watcher.tests.db import utils class TestDbAuditTemplateFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbAuditTemplateFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): def gen_name(): return "Audit Template %s" % w_utils.generate_uuid() self.audit_template1_name = gen_name() self.audit_template2_name = gen_name() self.audit_template3_name = gen_name() with freezegun.freeze_time(self.FAKE_TODAY): self.audit_template1 = utils.create_test_audit_template( name=self.audit_template1_name, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.audit_template2 = utils.create_test_audit_template( name=self.audit_template2_name, id=2, uuid=None) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.audit_template3 = utils.create_test_audit_template( name=self.audit_template3_name, id=3, uuid=None) def _soft_delete_audit_templates(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_audit_template(self.audit_template2.uuid) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_audit_template(self.audit_template3.uuid) def _update_audit_templates(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_audit_template( self.audit_template1.uuid, values={"name": "audit_template1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_audit_template( self.audit_template2.uuid, values={"name": "audit_template2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_audit_template( self.audit_template3.uuid, values={"name": "audit_template3"}) def test_get_audit_template_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) res = self.dbapi.get_audit_template_list( self.context, filters={'deleted': True}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) res = self.dbapi.get_audit_template_list( self.context, filters={'deleted': False}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_at_eq(self): self._soft_delete_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_at_lt(self): self._soft_delete_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_at_lte(self): self._soft_delete_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_at_gt(self): self._soft_delete_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_at_gte(self): self._soft_delete_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template1['id'], self.audit_template2['id']], [r.id for r in res]) # created_at # def test_get_audit_template_list_filter_created_at_eq(self): res = self.dbapi.get_audit_template_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_created_at_lt(self): res = self.dbapi.get_audit_template_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_created_at_lte(self): res = self.dbapi.get_audit_template_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_created_at_gt(self): res = self.dbapi.get_audit_template_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_created_at_gte(self): res = self.dbapi.get_audit_template_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template1['id'], self.audit_template2['id']], [r.id for r in res]) # updated_at # def test_get_audit_template_list_filter_updated_at_eq(self): self._update_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_updated_at_lt(self): self._update_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_updated_at_lte(self): self._update_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_updated_at_gt(self): self._update_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_updated_at_gte(self): self._update_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template1['id'], self.audit_template2['id']], [r.id for r in res]) class DbAuditTemplateTestCase(base.DbTestCase): def test_get_audit_template_list(self): uuids = [] for i in range(1, 4): audit_template = utils.create_test_audit_template( id=i, uuid=w_utils.generate_uuid(), name='My Audit Template {0}'.format(i)) uuids.append(str(audit_template['uuid'])) audit_templates = self.dbapi.get_audit_template_list(self.context) audit_template_uuids = [at.uuid for at in audit_templates] self.assertEqual(sorted(uuids), sorted(audit_template_uuids)) for audit_template in audit_templates: self.assertIsNone(audit_template.goal) self.assertIsNone(audit_template.strategy) def test_get_audit_template_list_eager(self): _goal = utils.get_test_goal() goal = self.dbapi.create_goal(_goal) _strategy = utils.get_test_strategy() strategy = self.dbapi.create_strategy(_strategy) uuids = [] for i in range(1, 4): audit_template = utils.create_test_audit_template( id=i, uuid=w_utils.generate_uuid(), name='My Audit Template {0}'.format(i), goal_id=goal.id, strategy_id=strategy.id) uuids.append(str(audit_template['uuid'])) audit_templates = self.dbapi.get_audit_template_list( self.context, eager=True) audit_template_map = {a.uuid: a for a in audit_templates} self.assertEqual(sorted(uuids), sorted(audit_template_map.keys())) eager_audit_template = audit_template_map[audit_template.uuid] self.assertEqual(goal.as_dict(), eager_audit_template.goal.as_dict()) self.assertEqual( strategy.as_dict(), eager_audit_template.strategy.as_dict()) def test_get_audit_template_list_with_filters(self): goal = utils.create_test_goal(name='DUMMY') audit_template1 = utils.create_test_audit_template( id=1, uuid=w_utils.generate_uuid(), name='My Audit Template 1', description='Description of my audit template 1', goal_id=goal['id']) audit_template2 = utils.create_test_audit_template( id=2, uuid=w_utils.generate_uuid(), name='My Audit Template 2', description='Description of my audit template 2', goal_id=goal['id']) audit_template3 = utils.create_test_audit_template( id=3, uuid=w_utils.generate_uuid(), name='My Audit Template 3', description='Description of my audit template 3', goal_id=goal['id']) self.dbapi.soft_delete_audit_template(audit_template3['uuid']) res = self.dbapi.get_audit_template_list( self.context, filters={'name': 'My Audit Template 1'}) self.assertEqual([audit_template1['id']], [r.id for r in res]) res = self.dbapi.get_audit_template_list( self.context, filters={'name': 'Does not exist'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_audit_template_list( self.context, filters={'goal_name': 'DUMMY'}) self.assertEqual( sorted([audit_template1['id'], audit_template2['id']]), sorted([r.id for r in res])) temp_context = self.context temp_context.show_deleted = True res = self.dbapi.get_audit_template_list( temp_context, filters={'goal_name': 'DUMMY'}) self.assertEqual( sorted([audit_template1['id'], audit_template2['id'], audit_template3['id']]), sorted([r.id for r in res])) res = self.dbapi.get_audit_template_list( self.context, filters={'name': 'My Audit Template 2'}) self.assertEqual([audit_template2['id']], [r.id for r in res]) def test_get_audit_template_list_with_filter_by_uuid(self): audit_template = utils.create_test_audit_template() res = self.dbapi.get_audit_template_list( self.context, filters={'uuid': audit_template["uuid"]}) self.assertEqual(len(res), 1) self.assertEqual(audit_template['uuid'], res[0].uuid) def test_get_audit_template_by_id(self): audit_template = utils.create_test_audit_template() audit_template = self.dbapi.get_audit_template_by_id( self.context, audit_template['id']) self.assertEqual(audit_template['uuid'], audit_template.uuid) def test_get_audit_template_by_uuid(self): audit_template = utils.create_test_audit_template() audit_template = self.dbapi.get_audit_template_by_uuid( self.context, audit_template['uuid']) self.assertEqual(audit_template['id'], audit_template.id) def test_get_audit_template_that_does_not_exist(self): self.assertRaises(exception.AuditTemplateNotFound, self.dbapi.get_audit_template_by_id, self.context, 1234) def test_update_audit_template(self): audit_template = utils.create_test_audit_template() res = self.dbapi.update_audit_template(audit_template['id'], {'name': 'updated-model'}) self.assertEqual('updated-model', res.name) def test_update_audit_template_that_does_not_exist(self): self.assertRaises(exception.AuditTemplateNotFound, self.dbapi.update_audit_template, 1234, {'name': ''}) def test_update_audit_template_uuid(self): audit_template = utils.create_test_audit_template() self.assertRaises(exception.Invalid, self.dbapi.update_audit_template, audit_template['id'], {'uuid': 'hello'}) def test_destroy_audit_template(self): audit_template = utils.create_test_audit_template() self.dbapi.destroy_audit_template(audit_template['id']) self.assertRaises(exception.AuditTemplateNotFound, self.dbapi.get_audit_template_by_id, self.context, audit_template['id']) def test_destroy_audit_template_by_uuid(self): uuid = w_utils.generate_uuid() utils.create_test_audit_template(uuid=uuid) self.assertIsNotNone(self.dbapi.get_audit_template_by_uuid( self.context, uuid)) self.dbapi.destroy_audit_template(uuid) self.assertRaises(exception.AuditTemplateNotFound, self.dbapi.get_audit_template_by_uuid, self.context, uuid) def test_destroy_audit_template_that_does_not_exist(self): self.assertRaises(exception.AuditTemplateNotFound, self.dbapi.destroy_audit_template, 1234) def test_create_audit_template_already_exists(self): uuid = w_utils.generate_uuid() utils.create_test_audit_template(id=1, uuid=uuid) self.assertRaises(exception.AuditTemplateAlreadyExists, utils.create_test_audit_template, id=2, uuid=uuid) def test_audit_template_create_same_name(self): audit_template1 = utils.create_test_audit_template( uuid=w_utils.generate_uuid(), name='audit_template_name') self.assertEqual(audit_template1['uuid'], audit_template1.uuid) self.assertRaises( exception.AuditTemplateAlreadyExists, utils.create_test_audit_template, uuid=w_utils.generate_uuid(), name='audit_template_name') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/test_efficacy_indicator.py0000664000175000017500000004201400000000000025260 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating EfficacyIndicator via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestDbEfficacyIndicatorFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbEfficacyIndicatorFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): self.audit_template_name = "Audit Template" self.audit_template = utils.create_test_audit_template( name=self.audit_template_name, id=1, uuid=None) self.audit = utils.create_test_audit( audit_template_id=self.audit_template.id, id=1, uuid=None) self.action_plan = utils.create_test_action_plan( audit_id=self.audit.id, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_TODAY): self.efficacy_indicator1 = utils.create_test_efficacy_indicator( action_plan_id=self.action_plan.id, id=1, uuid=None, name="efficacy_indicator1", description="Test Indicator 1") with freezegun.freeze_time(self.FAKE_OLD_DATE): self.efficacy_indicator2 = utils.create_test_efficacy_indicator( action_plan_id=self.action_plan.id, id=2, uuid=None, name="efficacy_indicator2", description="Test Indicator 2") with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.efficacy_indicator3 = utils.create_test_efficacy_indicator( action_plan_id=self.action_plan.id, id=3, uuid=None, name="efficacy_indicator3", description="Test Indicator 3") def _soft_delete_efficacy_indicators(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_efficacy_indicator( self.efficacy_indicator1.uuid) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_efficacy_indicator( self.efficacy_indicator2.uuid) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_efficacy_indicator( self.efficacy_indicator3.uuid) def _update_efficacy_indicators(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_efficacy_indicator( self.efficacy_indicator1.uuid, values={"description": "New description 1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_efficacy_indicator( self.efficacy_indicator2.uuid, values={"description": "New description 2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_efficacy_indicator( self.efficacy_indicator3.uuid, values={"description": "New description 3"}) def test_get_efficacy_indicator_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_efficacy_indicator( self.efficacy_indicator1.uuid) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted': True}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_efficacy_indicator( self.efficacy_indicator1.uuid) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted': False}) self.assertEqual([self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_at_eq(self): self._soft_delete_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_at_lt(self): self._soft_delete_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_at_lte(self): self._soft_delete_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_at_gt(self): self._soft_delete_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_at_gte(self): self._soft_delete_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], [r.id for r in res]) # created_at # def test_get_efficacy_indicator_filter_created_at_eq(self): res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_created_at_lt(self): with freezegun.freeze_time(self.FAKE_TODAY): res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_created_at_lte(self): res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_created_at_gt(self): res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_created_at_gte(self): res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], [r.id for r in res]) # updated_at # def test_get_efficacy_indicator_filter_updated_at_eq(self): self._update_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_updated_at_lt(self): self._update_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_updated_at_lte(self): self._update_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_updated_at_gt(self): self._update_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_updated_at_gte(self): self._update_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], [r.id for r in res]) class DbEfficacyIndicatorTestCase(base.DbTestCase): def test_get_efficacy_indicator_list(self): uuids = [] action_plan = utils.create_test_action_plan() for id_ in range(1, 4): efficacy_indicator = utils.create_test_efficacy_indicator( action_plan_id=action_plan.id, id=id_, uuid=None, name="efficacy_indicator", description="Test Indicator ") uuids.append(str(efficacy_indicator['uuid'])) efficacy_indicators = self.dbapi.get_efficacy_indicator_list( self.context) efficacy_indicator_uuids = [ei.uuid for ei in efficacy_indicators] self.assertEqual(sorted(uuids), sorted(efficacy_indicator_uuids)) for efficacy_indicator in efficacy_indicators: self.assertIsNone(efficacy_indicator.action_plan) def test_get_efficacy_indicator_list_eager(self): _action_plan = utils.get_test_action_plan() action_plan = self.dbapi.create_action_plan(_action_plan) uuids = [] for i in range(1, 4): efficacy_indicator = utils.create_test_efficacy_indicator( id=i, uuid=w_utils.generate_uuid(), action_plan_id=action_plan.id) uuids.append(str(efficacy_indicator['uuid'])) efficacy_indicators = self.dbapi.get_efficacy_indicator_list( self.context, eager=True) efficacy_indicator_map = {a.uuid: a for a in efficacy_indicators} self.assertEqual(sorted(uuids), sorted(efficacy_indicator_map.keys())) eager_efficacy_indicator = efficacy_indicator_map[ efficacy_indicator.uuid] self.assertEqual( action_plan.as_dict(), eager_efficacy_indicator.action_plan.as_dict()) def test_get_efficacy_indicator_list_with_filters(self): audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) action_plan = utils.create_test_action_plan( id=1, uuid=w_utils.generate_uuid(), audit_id=audit.id, first_efficacy_indicator_id=None, state=objects.action_plan.State.RECOMMENDED) efficacy_indicator1 = utils.create_test_efficacy_indicator( id=1, name='indicator_1', uuid=w_utils.generate_uuid(), action_plan_id=action_plan['id'], description='Description efficacy indicator 1', unit='%') efficacy_indicator2 = utils.create_test_efficacy_indicator( id=2, name='indicator_2', uuid=w_utils.generate_uuid(), action_plan_id=2, description='Description efficacy indicator 2', unit='%') efficacy_indicator3 = utils.create_test_efficacy_indicator( id=3, name='indicator_3', uuid=w_utils.generate_uuid(), action_plan_id=action_plan['id'], description='Description efficacy indicator 3', unit='%') efficacy_indicator4 = utils.create_test_efficacy_indicator( id=4, name='indicator_4', uuid=w_utils.generate_uuid(), action_plan_id=action_plan['id'], description='Description efficacy indicator 4', unit='%') self.dbapi.soft_delete_efficacy_indicator(efficacy_indicator4['uuid']) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'name': 'indicator_3'}) self.assertEqual([efficacy_indicator3['id']], [r.id for r in res]) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'unit': 'kWh'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'action_plan_id': 2}) self.assertEqual([efficacy_indicator2['id']], [r.id for r in res]) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'action_plan_uuid': action_plan['uuid']}) self.assertEqual( sorted([efficacy_indicator1['id'], efficacy_indicator3['id']]), sorted([r.id for r in res])) def test_get_efficacy_indicator_list_with_filter_by_uuid(self): efficacy_indicator = utils.create_test_efficacy_indicator() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'uuid': efficacy_indicator.uuid}) self.assertEqual(len(res), 1) self.assertEqual(efficacy_indicator.uuid, res[0].uuid) def test_get_efficacy_indicator_by_id(self): efficacy_indicator = utils.create_test_efficacy_indicator() efficacy_indicator = self.dbapi.get_efficacy_indicator_by_id( self.context, efficacy_indicator.id) self.assertEqual(efficacy_indicator.uuid, efficacy_indicator.uuid) def test_get_efficacy_indicator_by_uuid(self): efficacy_indicator = utils.create_test_efficacy_indicator() efficacy_indicator = self.dbapi.get_efficacy_indicator_by_uuid( self.context, efficacy_indicator.uuid) self.assertEqual(efficacy_indicator['id'], efficacy_indicator.id) def test_get_efficacy_indicator_that_does_not_exist(self): self.assertRaises( exception.EfficacyIndicatorNotFound, self.dbapi.get_efficacy_indicator_by_id, self.context, 1234) def test_update_efficacy_indicator(self): efficacy_indicator = utils.create_test_efficacy_indicator() res = self.dbapi.update_efficacy_indicator( efficacy_indicator.id, {'state': objects.action_plan.State.CANCELLED}) self.assertEqual('CANCELLED', res.state) def test_update_efficacy_indicator_that_does_not_exist(self): self.assertRaises( exception.EfficacyIndicatorNotFound, self.dbapi.update_efficacy_indicator, 1234, {'state': ''}) def test_update_efficacy_indicator_uuid(self): efficacy_indicator = utils.create_test_efficacy_indicator() self.assertRaises( exception.Invalid, self.dbapi.update_efficacy_indicator, efficacy_indicator.id, {'uuid': 'hello'}) def test_destroy_efficacy_indicator(self): efficacy_indicator = utils.create_test_efficacy_indicator() self.dbapi.destroy_efficacy_indicator(efficacy_indicator['id']) self.assertRaises(exception.EfficacyIndicatorNotFound, self.dbapi.get_efficacy_indicator_by_id, self.context, efficacy_indicator['id']) def test_destroy_efficacy_indicator_by_uuid(self): uuid = w_utils.generate_uuid() utils.create_test_efficacy_indicator(uuid=uuid) self.assertIsNotNone(self.dbapi.get_efficacy_indicator_by_uuid( self.context, uuid)) self.dbapi.destroy_efficacy_indicator(uuid) self.assertRaises( exception.EfficacyIndicatorNotFound, self.dbapi.get_efficacy_indicator_by_uuid, self.context, uuid) def test_destroy_efficacy_indicator_that_does_not_exist(self): self.assertRaises(exception.EfficacyIndicatorNotFound, self.dbapi.destroy_efficacy_indicator, 1234) def test_create_efficacy_indicator_already_exists(self): uuid = w_utils.generate_uuid() utils.create_test_efficacy_indicator(id=1, uuid=uuid) self.assertRaises(exception.EfficacyIndicatorAlreadyExists, utils.create_test_efficacy_indicator, id=2, uuid=uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/test_goal.py0000664000175000017500000002750700000000000022407 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Goal via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher.tests.db import base from watcher.tests.db import utils class TestDbGoalFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbGoalFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): with freezegun.freeze_time(self.FAKE_TODAY): self.goal1 = utils.create_test_goal( id=1, uuid=w_utils.generate_uuid(), name="GOAL_1", display_name="Goal 1") with freezegun.freeze_time(self.FAKE_OLD_DATE): self.goal2 = utils.create_test_goal( id=2, uuid=w_utils.generate_uuid(), name="GOAL_2", display_name="Goal 2") with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.goal3 = utils.create_test_goal( id=3, uuid=w_utils.generate_uuid(), name="GOAL_3", display_name="Goal 3") def _soft_delete_goals(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_goal(self.goal1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_goal(self.goal2.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_goal(self.goal3.id) def _update_goals(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_goal( self.goal1.uuid, values={"display_name": "goal1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_goal( self.goal2.uuid, values={"display_name": "goal2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_goal( self.goal3.uuid, values={"display_name": "goal3"}) def test_get_goal_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_goal(self.goal1.id) res = self.dbapi.get_goal_list( self.context, filters={'deleted': True}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_goal(self.goal1.id) res = self.dbapi.get_goal_list( self.context, filters={'deleted': False}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_deleted_at_eq(self): self._soft_delete_goals() res = self.dbapi.get_goal_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_deleted_at_lt(self): self._soft_delete_goals() res = self.dbapi.get_goal_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_deleted_at_lte(self): self._soft_delete_goals() res = self.dbapi.get_goal_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_deleted_at_gt(self): self._soft_delete_goals() res = self.dbapi.get_goal_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_deleted_at_gte(self): self._soft_delete_goals() res = self.dbapi.get_goal_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal1.uuid, self.goal2.uuid]), set([r.uuid for r in res])) # created_at # def test_get_goal_list_filter_created_at_eq(self): res = self.dbapi.get_goal_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_created_at_lt(self): res = self.dbapi.get_goal_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_created_at_lte(self): res = self.dbapi.get_goal_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_created_at_gt(self): res = self.dbapi.get_goal_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_created_at_gte(self): res = self.dbapi.get_goal_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal1.uuid, self.goal2.uuid]), set([r.uuid for r in res])) # updated_at # def test_get_goal_list_filter_updated_at_eq(self): self._update_goals() res = self.dbapi.get_goal_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_updated_at_lt(self): self._update_goals() res = self.dbapi.get_goal_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_updated_at_lte(self): self._update_goals() res = self.dbapi.get_goal_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_updated_at_gt(self): self._update_goals() res = self.dbapi.get_goal_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_updated_at_gte(self): self._update_goals() res = self.dbapi.get_goal_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal1.uuid, self.goal2.uuid]), set([r.uuid for r in res])) class DbGoalTestCase(base.DbTestCase): def test_get_goal_list(self): uuids = [] for i in range(1, 4): goal = utils.create_test_goal( id=i, uuid=w_utils.generate_uuid(), name="GOAL_%s" % i, display_name='My Goal %s' % i) uuids.append(str(goal['uuid'])) goals = self.dbapi.get_goal_list(self.context) goal_uuids = [g.uuid for g in goals] self.assertEqual(sorted(uuids), sorted(goal_uuids)) def test_get_goal_list_with_filters(self): goal1 = utils.create_test_goal( id=1, uuid=w_utils.generate_uuid(), name="GOAL_1", display_name='Goal 1', ) goal2 = utils.create_test_goal( id=2, uuid=w_utils.generate_uuid(), name="GOAL_2", display_name='Goal 2', ) goal3 = utils.create_test_goal( id=3, uuid=w_utils.generate_uuid(), name="GOAL_3", display_name='Goal 3', ) self.dbapi.soft_delete_goal(goal3['uuid']) res = self.dbapi.get_goal_list( self.context, filters={'display_name': 'Goal 1'}) self.assertEqual([goal1['uuid']], [r.uuid for r in res]) res = self.dbapi.get_goal_list( self.context, filters={'display_name': 'Goal 3'}) self.assertEqual([], [r.uuid for r in res]) res = self.dbapi.get_goal_list( self.context, filters={'name': 'GOAL_1'}) self.assertEqual([goal1['uuid']], [r.uuid for r in res]) res = self.dbapi.get_goal_list( self.context, filters={'display_name': 'Goal 2'}) self.assertEqual([goal2['uuid']], [r.uuid for r in res]) res = self.dbapi.get_goal_list( self.context, filters={'uuid': goal3['uuid']}) self.assertEqual([], [r.uuid for r in res]) def test_get_goal_by_uuid(self): efficacy_spec = [{"unit": "%", "name": "dummy", "schema": "Range(min=0, max=100, min_included=True, " "max_included=True, msg=None)", "description": "Dummy indicator"}] created_goal = utils.create_test_goal( efficacy_specification=efficacy_spec) goal = self.dbapi.get_goal_by_uuid(self.context, created_goal['uuid']) self.assertEqual(goal.uuid, created_goal['uuid']) def test_get_goal_that_does_not_exist(self): random_uuid = w_utils.generate_uuid() self.assertRaises(exception.GoalNotFound, self.dbapi.get_goal_by_uuid, self.context, random_uuid) def test_update_goal(self): goal = utils.create_test_goal() res = self.dbapi.update_goal(goal['uuid'], {'display_name': 'updated-model'}) self.assertEqual('updated-model', res.display_name) def test_update_goal_id(self): goal = utils.create_test_goal() self.assertRaises(exception.Invalid, self.dbapi.update_goal, goal['uuid'], {'uuid': 'NEW_GOAL'}) def test_update_goal_that_does_not_exist(self): random_uuid = w_utils.generate_uuid() self.assertRaises(exception.GoalNotFound, self.dbapi.update_goal, random_uuid, {'display_name': ''}) def test_destroy_goal(self): goal = utils.create_test_goal() self.dbapi.destroy_goal(goal['uuid']) self.assertRaises(exception.GoalNotFound, self.dbapi.get_goal_by_uuid, self.context, goal['uuid']) def test_destroy_goal_that_does_not_exist(self): random_uuid = w_utils.generate_uuid() self.assertRaises(exception.GoalNotFound, self.dbapi.destroy_goal, random_uuid) def test_create_goal_already_exists(self): goal_uuid = w_utils.generate_uuid() utils.create_test_goal(uuid=goal_uuid) self.assertRaises(exception.GoalAlreadyExists, utils.create_test_goal, uuid=goal_uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/test_purge.py0000664000175000017500000005565000000000000022607 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_utils import uuidutils import freezegun from watcher.common import context as watcher_context from watcher.common import utils from watcher.db import purge from watcher.db.sqlalchemy import api as dbapi from watcher.tests.db import base from watcher.tests.objects import utils as obj_utils class TestPurgeCommand(base.DbTestCase): def setUp(self): super(TestPurgeCommand, self).setUp() self.cmd = purge.PurgeCommand() token_info = { 'token': { 'project': { 'id': 'fake_project' }, 'user': { 'id': 'fake_user' } } } self.context = watcher_context.RequestContext( auth_token_info=token_info, project_id='fake_project', user_id='fake_user', show_deleted=True, ) self.fake_today = '2016-02-24T09:52:05.219414+00:00' self.expired_date = '2016-01-24T09:52:05.219414+00:00' self.m_input = mock.Mock() p = mock.patch("watcher.db.purge.input", self.m_input) self.m_input.return_value = 'y' p.start() self.addCleanup(p.stop) self._id_generator = None self._data_setup() def _generate_id(self): if self._id_generator is None: self._id_generator = self._get_id_generator() return next(self._id_generator) def _get_id_generator(self): seed = 1 while True: yield seed seed += 1 def generate_unique_name(self, prefix): return "%s%s" % (prefix, uuidutils.generate_uuid()) def _data_setup(self): # All the 1's are soft_deleted and are expired # All the 2's are soft_deleted but are not expired # All the 3's are *not* soft_deleted # Number of days we want to keep in DB (no purge for them) self.cmd.age_in_days = 10 self.cmd.max_number = None self.cmd.orphans = True goal1_name = "GOAL_1" goal2_name = "GOAL_2" goal3_name = "GOAL_3" strategy1_name = "strategy_1" strategy2_name = "strategy_2" strategy3_name = "strategy_3" self.audit_template1_name = self.generate_unique_name( prefix="Audit Template 1 ") self.audit_template2_name = self.generate_unique_name( prefix="Audit Template 2 ") self.audit_template3_name = self.generate_unique_name( prefix="Audit Template 3 ") self.audit1_name = self.generate_unique_name( prefix="Audit 1 ") self.audit2_name = self.generate_unique_name( prefix="Audit 2 ") self.audit3_name = self.generate_unique_name( prefix="Audit 3 ") with freezegun.freeze_time(self.expired_date): self.goal1 = obj_utils.create_test_goal( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=goal1_name, display_name=goal1_name.lower()) self.goal2 = obj_utils.create_test_goal( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=goal2_name, display_name=goal2_name.lower()) self.goal3 = obj_utils.create_test_goal( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=goal3_name, display_name=goal3_name.lower()) self.goal1.soft_delete() with freezegun.freeze_time(self.expired_date): self.strategy1 = obj_utils.create_test_strategy( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=strategy1_name, display_name=strategy1_name.lower(), goal_id=self.goal1.id) self.strategy2 = obj_utils.create_test_strategy( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=strategy2_name, display_name=strategy2_name.lower(), goal_id=self.goal2.id) self.strategy3 = obj_utils.create_test_strategy( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=strategy3_name, display_name=strategy3_name.lower(), goal_id=self.goal3.id) self.strategy1.soft_delete() with freezegun.freeze_time(self.expired_date): self.audit_template1 = obj_utils.create_test_audit_template( self.context, name=self.audit_template1_name, id=self._generate_id(), uuid=utils.generate_uuid(), goal_id=self.goal1.id, strategy_id=self.strategy1.id) self.audit_template2 = obj_utils.create_test_audit_template( self.context, name=self.audit_template2_name, id=self._generate_id(), uuid=utils.generate_uuid(), goal_id=self.goal2.id, strategy_id=self.strategy2.id) self.audit_template3 = obj_utils.create_test_audit_template( self.context, name=self.audit_template3_name, id=self._generate_id(), uuid=utils.generate_uuid(), goal_id=self.goal3.id, strategy_id=self.strategy3.id) self.audit_template1.soft_delete() with freezegun.freeze_time(self.expired_date): self.audit1 = obj_utils.create_test_audit( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.audit1_name, goal_id=self.goal1.id, strategy_id=self.strategy1.id) self.audit2 = obj_utils.create_test_audit( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.audit2_name, goal_id=self.goal2.id, strategy_id=self.strategy2.id) self.audit3 = obj_utils.create_test_audit( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.audit3_name, goal_id=self.goal3.id, strategy_id=self.strategy3.id) self.audit1.soft_delete() with freezegun.freeze_time(self.expired_date): self.action_plan1 = obj_utils.create_test_action_plan( self.context, audit_id=self.audit1.id, id=self._generate_id(), uuid=utils.generate_uuid(), strategy_id=self.strategy1.id) self.action_plan2 = obj_utils.create_test_action_plan( self.context, audit_id=self.audit2.id, id=self._generate_id(), strategy_id=self.strategy2.id, uuid=utils.generate_uuid()) self.action_plan3 = obj_utils.create_test_action_plan( self.context, audit_id=self.audit3.id, id=self._generate_id(), uuid=utils.generate_uuid(), strategy_id=self.strategy3.id) self.action1 = obj_utils.create_test_action( self.context, action_plan_id=self.action_plan1.id, id=self._generate_id(), uuid=utils.generate_uuid()) self.action2 = obj_utils.create_test_action( self.context, action_plan_id=self.action_plan2.id, id=self._generate_id(), uuid=utils.generate_uuid()) self.action3 = obj_utils.create_test_action( self.context, action_plan_id=self.action_plan3.id, id=self._generate_id(), uuid=utils.generate_uuid()) self.action_plan1.soft_delete() @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_execute_max_number_exceeded(self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): self.cmd.age_in_days = None self.cmd.max_number = 10 with freezegun.freeze_time(self.fake_today): self.goal2.soft_delete() self.strategy2.soft_delete() self.audit_template2.soft_delete() self.audit2.soft_delete() self.action_plan2.soft_delete() with freezegun.freeze_time(self.fake_today): self.cmd.execute() # The 1's and the 2's are purgeable (due to age of day set to 0), # but max_number = 10, and because of no Db integrity violation, we # should be able to purge only 6 objects. self.assertEqual(m_destroy_goal.call_count, 1) self.assertEqual(m_destroy_strategy.call_count, 1) self.assertEqual(m_destroy_audit_template.call_count, 1) self.assertEqual(m_destroy_audit.call_count, 1) self.assertEqual(m_destroy_action_plan.call_count, 1) self.assertEqual(m_destroy_action.call_count, 1) def test_find_deleted_entries(self): self.cmd.age_in_days = None with freezegun.freeze_time(self.fake_today): objects_map = self.cmd.find_objects_to_delete() self.assertEqual(len(objects_map.goals), 1) self.assertEqual(len(objects_map.strategies), 1) self.assertEqual(len(objects_map.audit_templates), 1) self.assertEqual(len(objects_map.audits), 1) self.assertEqual(len(objects_map.action_plans), 1) self.assertEqual(len(objects_map.actions), 1) def test_find_deleted_and_expired_entries(self): with freezegun.freeze_time(self.fake_today): self.goal2.soft_delete() self.strategy2.soft_delete() self.audit_template2.soft_delete() self.audit2.soft_delete() self.action_plan2.soft_delete() with freezegun.freeze_time(self.fake_today): objects_map = self.cmd.find_objects_to_delete() # The 1's are purgeable (due to age of day set to 10) self.assertEqual(len(objects_map.goals), 1) self.assertEqual(len(objects_map.strategies), 1) self.assertEqual(len(objects_map.audit_templates), 1) self.assertEqual(len(objects_map.audits), 1) self.assertEqual(len(objects_map.action_plans), 1) self.assertEqual(len(objects_map.actions), 1) def test_find_deleted_and_nonexpired_related_entries(self): with freezegun.freeze_time(self.fake_today): # orphan audit template audit_template4 = obj_utils.create_test_audit_template( self.context, goal_id=self.goal2.id, name=self.generate_unique_name(prefix="Audit Template 4 "), strategy_id=self.strategy1.id, id=self._generate_id(), uuid=utils.generate_uuid()) audit4 = obj_utils.create_test_audit( self.context, audit_template_id=audit_template4.id, strategy_id=self.strategy1.id, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.generate_unique_name(prefix="Audit 4 ")) action_plan4 = obj_utils.create_test_action_plan( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), audit_id=audit4.id, strategy_id=self.strategy1.id) action4 = obj_utils.create_test_action( self.context, action_plan_id=action_plan4.id, id=self._generate_id(), uuid=utils.generate_uuid()) audit_template5 = obj_utils.create_test_audit_template( self.context, goal_id=self.goal1.id, name=self.generate_unique_name(prefix="Audit Template 5 "), strategy_id=None, id=self._generate_id(), uuid=utils.generate_uuid()) audit5 = obj_utils.create_test_audit( self.context, audit_template_id=audit_template5.id, strategy_id=self.strategy1.id, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.generate_unique_name(prefix="Audit 5 ")) action_plan5 = obj_utils.create_test_action_plan( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), audit_id=audit5.id, strategy_id=self.strategy1.id) action5 = obj_utils.create_test_action( self.context, action_plan_id=action_plan5.id, id=self._generate_id(), uuid=utils.generate_uuid()) self.goal2.soft_delete() self.strategy2.soft_delete() self.audit_template2.soft_delete() self.audit2.soft_delete() self.action_plan2.soft_delete() # All the 4's should be purged as well because they are orphans # even though they were not deleted # All the 5's should be purged as well even though they are not # expired because their related audit template is itself expired audit_template5.soft_delete() audit5.soft_delete() action_plan5.soft_delete() with freezegun.freeze_time(self.fake_today): objects_map = self.cmd.find_objects_to_delete() self.assertEqual(len(objects_map.goals), 1) self.assertEqual(len(objects_map.strategies), 1) self.assertEqual(len(objects_map.audit_templates), 3) self.assertEqual(len(objects_map.audits), 3) self.assertEqual(len(objects_map.action_plans), 3) self.assertEqual(len(objects_map.actions), 3) self.assertEqual( set([self.action1.id, action4.id, action5.id]), set([entry.id for entry in objects_map.actions])) @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_purge_command(self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): with freezegun.freeze_time(self.fake_today): self.cmd.execute() m_destroy_audit_template.assert_called_once_with( self.audit_template1.uuid) m_destroy_audit.assert_called_with( self.audit1.uuid) m_destroy_action_plan.assert_called_with( self.action_plan1.uuid) m_destroy_action.assert_called_with( self.action1.uuid) @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_purge_command_with_nonexpired_related_entries( self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): with freezegun.freeze_time(self.fake_today): # orphan audit template audit_template4 = obj_utils.create_test_audit_template( self.context, goal_id=self.goal2.id, name=self.generate_unique_name(prefix="Audit Template 4 "), strategy_id=None, id=self._generate_id(), uuid=utils.generate_uuid()) audit4 = obj_utils.create_test_audit( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), audit_template_id=audit_template4.id, name=self.generate_unique_name(prefix="Audit 4 ")) action_plan4 = obj_utils.create_test_action_plan( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), audit_id=audit4.id, strategy_id=self.strategy1.id) action4 = obj_utils.create_test_action( self.context, action_plan_id=action_plan4.id, id=self._generate_id(), uuid=utils.generate_uuid()) audit_template5 = obj_utils.create_test_audit_template( self.context, goal_id=self.goal1.id, name=self.generate_unique_name(prefix="Audit Template 5 "), strategy_id=None, id=self._generate_id(), uuid=utils.generate_uuid()) audit5 = obj_utils.create_test_audit( self.context, audit_template_id=audit_template5.id, strategy_id=self.strategy1.id, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.generate_unique_name(prefix="Audit 5 ")) action_plan5 = obj_utils.create_test_action_plan( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), audit_id=audit5.id, strategy_id=self.strategy1.id) action5 = obj_utils.create_test_action( self.context, action_plan_id=action_plan5.id, id=self._generate_id(), uuid=utils.generate_uuid()) self.goal2.soft_delete() self.strategy2.soft_delete() self.audit_template2.soft_delete() self.audit2.soft_delete() self.action_plan2.soft_delete() # All the 4's should be purged as well because they are orphans # even though they were not deleted # All the 5's should be purged as well even though they are not # expired because their related audit template is itself expired audit_template5.soft_delete() audit5.soft_delete() action_plan5.soft_delete() with freezegun.freeze_time(self.fake_today): self.cmd.execute() self.assertEqual(m_destroy_goal.call_count, 1) self.assertEqual(m_destroy_strategy.call_count, 1) self.assertEqual(m_destroy_audit_template.call_count, 3) self.assertEqual(m_destroy_audit.call_count, 3) self.assertEqual(m_destroy_action_plan.call_count, 3) self.assertEqual(m_destroy_action.call_count, 3) m_destroy_audit_template.assert_any_call(self.audit_template1.uuid) m_destroy_audit.assert_any_call(self.audit1.uuid) m_destroy_audit.assert_any_call(audit4.uuid) m_destroy_action_plan.assert_any_call(self.action_plan1.uuid) m_destroy_action_plan.assert_any_call(action_plan4.uuid) m_destroy_action_plan.assert_any_call(action_plan5.uuid) m_destroy_action.assert_any_call(self.action1.uuid) m_destroy_action.assert_any_call(action4.uuid) m_destroy_action.assert_any_call(action5.uuid) @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_purge_command_with_strategy_uuid( self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): self.cmd.exclude_orphans = False self.cmd.uuid = self.strategy1.uuid with freezegun.freeze_time(self.fake_today): self.cmd.execute() self.assertEqual(m_destroy_goal.call_count, 0) self.assertEqual(m_destroy_strategy.call_count, 1) self.assertEqual(m_destroy_audit_template.call_count, 1) self.assertEqual(m_destroy_audit.call_count, 1) self.assertEqual(m_destroy_action_plan.call_count, 1) self.assertEqual(m_destroy_action.call_count, 1) @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_purge_command_with_audit_template_not_expired( self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): self.cmd.exclude_orphans = True self.cmd.uuid = self.audit_template2.uuid with freezegun.freeze_time(self.fake_today): self.cmd.execute() self.assertEqual(m_destroy_goal.call_count, 0) self.assertEqual(m_destroy_strategy.call_count, 0) self.assertEqual(m_destroy_audit_template.call_count, 0) self.assertEqual(m_destroy_audit.call_count, 0) self.assertEqual(m_destroy_action_plan.call_count, 0) self.assertEqual(m_destroy_action.call_count, 0) @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_purge_command_with_audit_template_not_soft_deleted( self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): self.cmd.exclude_orphans = False self.cmd.uuid = self.audit_template3.uuid with freezegun.freeze_time(self.fake_today): self.cmd.execute() self.assertEqual(m_destroy_goal.call_count, 0) self.assertEqual(m_destroy_strategy.call_count, 0) self.assertEqual(m_destroy_audit_template.call_count, 0) self.assertEqual(m_destroy_audit.call_count, 0) self.assertEqual(m_destroy_action_plan.call_count, 0) self.assertEqual(m_destroy_action.call_count, 0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/test_scoring_engine.py0000664000175000017500000003313400000000000024447 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for manipulating ScoringEngine via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher.tests.db import base from watcher.tests.db import utils class TestDbScoringEngineFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbScoringEngineFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): with freezegun.freeze_time(self.FAKE_TODAY): self.scoring_engine1 = utils.create_test_scoring_engine( id=1, uuid='e8370ede-4f39-11e6-9ffa-08002722cb22', name="se-1", description="Scoring Engine 1", metainfo="a1=b1") with freezegun.freeze_time(self.FAKE_OLD_DATE): self.scoring_engine2 = utils.create_test_scoring_engine( id=2, uuid='e8370ede-4f39-11e6-9ffa-08002722cb23', name="se-2", description="Scoring Engine 2", metainfo="a2=b2") with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.scoring_engine3 = utils.create_test_scoring_engine( id=3, uuid='e8370ede-4f39-11e6-9ffa-08002722cb24', name="se-3", description="Scoring Engine 3", metainfo="a3=b3") def _soft_delete_scoring_engines(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_scoring_engine(self.scoring_engine2.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_scoring_engine(self.scoring_engine3.id) def _update_scoring_engines(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_scoring_engine( self.scoring_engine1.id, values={"description": "scoring_engine1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_scoring_engine( self.scoring_engine2.id, values={"description": "scoring_engine2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_scoring_engine( self.scoring_engine3.id, values={"description": "scoring_engine3"}) def test_get_scoring_engine_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted': True}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted': False}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_deleted_at_eq(self): self._soft_delete_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_deleted_at_lt(self): self._soft_delete_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_deleted_at_lte(self): self._soft_delete_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_deleted_at_gt(self): self._soft_delete_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_deleted_at_gte(self): self._soft_delete_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine1['id'], self.scoring_engine2['id']]), set([r.id for r in res])) # created_at # def test_get_scoring_engine_list_filter_created_at_eq(self): res = self.dbapi.get_scoring_engine_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_created_at_lt(self): res = self.dbapi.get_scoring_engine_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_created_at_lte(self): res = self.dbapi.get_scoring_engine_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_created_at_gt(self): res = self.dbapi.get_scoring_engine_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_created_at_gte(self): res = self.dbapi.get_scoring_engine_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine1['id'], self.scoring_engine2['id']]), set([r.id for r in res])) # updated_at # def test_get_scoring_engine_list_filter_updated_at_eq(self): self._update_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_updated_at_lt(self): self._update_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_updated_at_lte(self): self._update_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_updated_at_gt(self): self._update_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_updated_at_gte(self): self._update_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine1['id'], self.scoring_engine2['id']]), set([r.id for r in res])) class DbScoringEngineTestCase(base.DbTestCase): def test_get_scoring_engine_list(self): names = [] for i in range(1, 4): scoring_engine = utils.create_test_scoring_engine( id=i, uuid=w_utils.generate_uuid(), name="SE_ID_%s" % i, description='My ScoringEngine {0}'.format(i), metainfo='a{0}=b{0}'.format(i)) names.append(str(scoring_engine['name'])) scoring_engines = self.dbapi.get_scoring_engine_list(self.context) scoring_engines_names = [se.name for se in scoring_engines] self.assertEqual(sorted(names), sorted(scoring_engines_names)) def test_get_scoring_engine_list_with_filters(self): scoring_engine1 = utils.create_test_scoring_engine( id=1, uuid=w_utils.generate_uuid(), name="SE_ID_1", description='ScoringEngine 1', metainfo="a1=b1", ) scoring_engine2 = utils.create_test_scoring_engine( id=2, uuid=w_utils.generate_uuid(), name="SE_ID_2", description='ScoringEngine 2', metainfo="a2=b2", ) scoring_engine3 = utils.create_test_scoring_engine( id=3, uuid=w_utils.generate_uuid(), name="SE_ID_3", description='ScoringEngine 3', metainfo="a3=b3", ) self.dbapi.soft_delete_scoring_engine(scoring_engine3['uuid']) res = self.dbapi.get_scoring_engine_list( self.context, filters={'description': 'ScoringEngine 1'}) self.assertEqual([scoring_engine1['name']], [r.name for r in res]) res = self.dbapi.get_scoring_engine_list( self.context, filters={'description': 'ScoringEngine 3'}) self.assertEqual([], [r.name for r in res]) res = self.dbapi.get_scoring_engine_list( self.context, filters={'description': 'ScoringEngine 2'}) self.assertEqual([scoring_engine2['name']], [r.name for r in res]) def test_get_scoring_engine_by_id(self): created_scoring_engine = utils.create_test_scoring_engine() scoring_engine = self.dbapi.get_scoring_engine_by_id( self.context, created_scoring_engine['id']) self.assertEqual(scoring_engine.id, created_scoring_engine['id']) def test_get_scoring_engine_by_uuid(self): created_scoring_engine = utils.create_test_scoring_engine() scoring_engine = self.dbapi.get_scoring_engine_by_uuid( self.context, created_scoring_engine['uuid']) self.assertEqual(scoring_engine.uuid, created_scoring_engine['uuid']) def test_get_scoring_engine_by_name(self): created_scoring_engine = utils.create_test_scoring_engine() scoring_engine = self.dbapi.get_scoring_engine_by_name( self.context, created_scoring_engine['name']) self.assertEqual(scoring_engine.name, created_scoring_engine['name']) def test_get_scoring_engine_that_does_not_exist(self): self.assertRaises(exception.ScoringEngineNotFound, self.dbapi.get_scoring_engine_by_id, self.context, 404) def test_update_scoring_engine(self): scoring_engine = utils.create_test_scoring_engine() res = self.dbapi.update_scoring_engine( scoring_engine['id'], {'description': 'updated-model'}) self.assertEqual('updated-model', res.description) def test_update_scoring_engine_id(self): scoring_engine = utils.create_test_scoring_engine() self.assertRaises(exception.Invalid, self.dbapi.update_scoring_engine, scoring_engine['id'], {'uuid': w_utils.generate_uuid()}) def test_update_scoring_engine_that_does_not_exist(self): self.assertRaises(exception.ScoringEngineNotFound, self.dbapi.update_scoring_engine, 404, {'description': ''}) def test_destroy_scoring_engine(self): scoring_engine = utils.create_test_scoring_engine() self.dbapi.destroy_scoring_engine(scoring_engine['id']) self.assertRaises(exception.ScoringEngineNotFound, self.dbapi.get_scoring_engine_by_id, self.context, scoring_engine['id']) def test_destroy_scoring_engine_that_does_not_exist(self): self.assertRaises(exception.ScoringEngineNotFound, self.dbapi.destroy_scoring_engine, 404) def test_create_scoring_engine_already_exists(self): scoring_engine_id = "SE_ID" utils.create_test_scoring_engine(name=scoring_engine_id) self.assertRaises(exception.ScoringEngineAlreadyExists, utils.create_test_scoring_engine, name=scoring_engine_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/test_service.py0000664000175000017500000002540500000000000023120 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for manipulating Service via the DB API""" import freezegun from oslo_utils import timeutils from watcher.common import exception from watcher.tests.db import base from watcher.tests.db import utils class TestDbServiceFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbServiceFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): service1_name = "SERVICE_ID_1" service2_name = "SERVICE_ID_2" service3_name = "SERVICE_ID_3" with freezegun.freeze_time(self.FAKE_TODAY): self.service1 = utils.create_test_service( id=1, name=service1_name, host="controller", last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.service2 = utils.create_test_service( id=2, name=service2_name, host="controller", last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.service3 = utils.create_test_service( id=3, name=service3_name, host="controller", last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) def _soft_delete_services(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_service(self.service1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_service(self.service2.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_service(self.service3.id) def _update_services(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_service( self.service1.id, values={"host": "controller1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_service( self.service2.id, values={"host": "controller2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_service( self.service3.id, values={"host": "controller3"}) def test_get_service_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_service(self.service1.id) res = self.dbapi.get_service_list( self.context, filters={'deleted': True}) self.assertEqual([self.service1['name']], [r.name for r in res]) def test_get_service_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_service(self.service1.id) res = self.dbapi.get_service_list( self.context, filters={'deleted': False}) self.assertEqual( set([self.service2['name'], self.service3['name']]), set([r.name for r in res])) def test_get_service_list_filter_deleted_at_eq(self): self._soft_delete_services() res = self.dbapi.get_service_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_deleted_at_lt(self): self._soft_delete_services() res = self.dbapi.get_service_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_deleted_at_lte(self): self._soft_delete_services() res = self.dbapi.get_service_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_deleted_at_gt(self): self._soft_delete_services() res = self.dbapi.get_service_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_deleted_at_gte(self): self._soft_delete_services() res = self.dbapi.get_service_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service1['id'], self.service2['id']]), set([r.id for r in res])) # created_at # def test_get_service_list_filter_created_at_eq(self): res = self.dbapi.get_service_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_created_at_lt(self): res = self.dbapi.get_service_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_created_at_lte(self): res = self.dbapi.get_service_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_created_at_gt(self): res = self.dbapi.get_service_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_created_at_gte(self): res = self.dbapi.get_service_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service1['id'], self.service2['id']]), set([r.id for r in res])) # updated_at # def test_get_service_list_filter_updated_at_eq(self): self._update_services() res = self.dbapi.get_service_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_updated_at_lt(self): self._update_services() res = self.dbapi.get_service_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_updated_at_lte(self): self._update_services() res = self.dbapi.get_service_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_updated_at_gt(self): self._update_services() res = self.dbapi.get_service_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_updated_at_gte(self): self._update_services() res = self.dbapi.get_service_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service1['id'], self.service2['id']]), set([r.id for r in res])) class DbServiceTestCase(base.DbTestCase): def test_get_service_list(self): ids = [] for i in range(1, 4): service = utils.create_test_service( id=i, name="SERVICE_ID_%s" % i, host="controller_{0}".format(i)) ids.append(service['id']) services = self.dbapi.get_service_list(self.context) service_ids = [s.id for s in services] self.assertEqual(sorted(ids), sorted(service_ids)) def test_get_service_list_with_filters(self): service1 = utils.create_test_service( id=1, name="SERVICE_ID_1", host="controller_1", ) service2 = utils.create_test_service( id=2, name="SERVICE_ID_2", host="controller_2", ) service3 = utils.create_test_service( id=3, name="SERVICE_ID_3", host="controller_3", ) self.dbapi.soft_delete_service(service3['id']) res = self.dbapi.get_service_list( self.context, filters={'host': 'controller_1'}) self.assertEqual([service1['id']], [r.id for r in res]) res = self.dbapi.get_service_list( self.context, filters={'host': 'controller_3'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_service_list( self.context, filters={'host': 'controller_2'}) self.assertEqual([service2['id']], [r.id for r in res]) def test_get_service_by_name(self): created_service = utils.create_test_service() service = self.dbapi.get_service_by_name( self.context, created_service['name']) self.assertEqual(service.name, created_service['name']) def test_get_service_that_does_not_exist(self): self.assertRaises(exception.ServiceNotFound, self.dbapi.get_service_by_id, self.context, 404) def test_update_service(self): service = utils.create_test_service() res = self.dbapi.update_service( service['id'], {'host': 'controller_test'}) self.assertEqual('controller_test', res.host) def test_update_service_that_does_not_exist(self): self.assertRaises(exception.ServiceNotFound, self.dbapi.update_service, 405, {'name': ''}) def test_create_service_already_exists(self): service_id = "STRATEGY_ID" utils.create_test_service(name=service_id) self.assertRaises(exception.ServiceAlreadyExists, utils.create_test_service, name=service_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/test_strategy.py0000664000175000017500000003352000000000000023317 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for manipulating Strategy via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher.tests.db import base from watcher.tests.db import utils class TestDbStrategyFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbStrategyFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): strategy1_name = "STRATEGY_ID_1" strategy2_name = "STRATEGY_ID_2" strategy3_name = "STRATEGY_ID_3" self.goal1 = utils.create_test_goal( id=1, uuid=w_utils.generate_uuid(), name="GOAL_ID", display_name="Goal") self.goal2 = utils.create_test_goal( id=2, uuid=w_utils.generate_uuid(), name="DUMMY", display_name="Dummy") with freezegun.freeze_time(self.FAKE_TODAY): self.strategy1 = utils.create_test_strategy( id=1, uuid=w_utils.generate_uuid(), name=strategy1_name, display_name="Strategy 1", goal_id=self.goal1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.strategy2 = utils.create_test_strategy( id=2, uuid=w_utils.generate_uuid(), name=strategy2_name, display_name="Strategy 2", goal_id=self.goal1.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.strategy3 = utils.create_test_strategy( id=3, uuid=w_utils.generate_uuid(), name=strategy3_name, display_name="Strategy 3", goal_id=self.goal2.id) def _soft_delete_strategys(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_strategy(self.strategy1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_strategy(self.strategy2.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_strategy(self.strategy3.id) def _update_strategies(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_strategy( self.strategy1.id, values={"display_name": "strategy1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_strategy( self.strategy2.id, values={"display_name": "strategy2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_strategy( self.strategy3.id, values={"display_name": "strategy3"}) def test_get_strategy_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_strategy(self.strategy1.id) res = self.dbapi.get_strategy_list( self.context, filters={'deleted': True}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_strategy(self.strategy1.id) res = self.dbapi.get_strategy_list( self.context, filters={'deleted': False}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_deleted_at_eq(self): self._soft_delete_strategys() res = self.dbapi.get_strategy_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_deleted_at_lt(self): self._soft_delete_strategys() res = self.dbapi.get_strategy_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_deleted_at_lte(self): self._soft_delete_strategys() res = self.dbapi.get_strategy_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_deleted_at_gt(self): self._soft_delete_strategys() res = self.dbapi.get_strategy_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_deleted_at_gte(self): self._soft_delete_strategys() res = self.dbapi.get_strategy_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy1['uuid'], self.strategy2['uuid']]), set([r.uuid for r in res])) # created_at # def test_get_strategy_list_filter_created_at_eq(self): res = self.dbapi.get_strategy_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_created_at_lt(self): res = self.dbapi.get_strategy_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_created_at_lte(self): res = self.dbapi.get_strategy_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_created_at_gt(self): res = self.dbapi.get_strategy_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_created_at_gte(self): res = self.dbapi.get_strategy_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy1['uuid'], self.strategy2['uuid']]), set([r.uuid for r in res])) # updated_at # def test_get_strategy_list_filter_updated_at_eq(self): self._update_strategies() res = self.dbapi.get_strategy_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_updated_at_lt(self): self._update_strategies() res = self.dbapi.get_strategy_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_updated_at_lte(self): self._update_strategies() res = self.dbapi.get_strategy_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_updated_at_gt(self): self._update_strategies() res = self.dbapi.get_strategy_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_updated_at_gte(self): self._update_strategies() res = self.dbapi.get_strategy_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy1['uuid'], self.strategy2['uuid']]), set([r.uuid for r in res])) class DbStrategyTestCase(base.DbTestCase): def test_get_strategy_list(self): uuids = [] for i in range(1, 4): strategy = utils.create_test_strategy( id=i, uuid=w_utils.generate_uuid(), name="STRATEGY_ID_%s" % i, display_name='My Strategy {0}'.format(i)) uuids.append(str(strategy['uuid'])) strategies = self.dbapi.get_strategy_list(self.context) strategy_uuids = [s.uuid for s in strategies] self.assertEqual(sorted(uuids), sorted(strategy_uuids)) for strategy in strategies: self.assertIsNone(strategy.goal) def test_get_strategy_list_eager(self): _goal = utils.get_test_goal() goal = self.dbapi.create_goal(_goal) uuids = [] for i in range(1, 4): strategy = utils.create_test_strategy( id=i, uuid=w_utils.generate_uuid(), name="STRATEGY_ID_%s" % i, display_name='My Strategy {0}'.format(i), goal_id=goal.id) uuids.append(str(strategy['uuid'])) strategys = self.dbapi.get_strategy_list(self.context, eager=True) strategy_map = {a.uuid: a for a in strategys} self.assertEqual(sorted(uuids), sorted(strategy_map.keys())) eager_strategy = strategy_map[strategy.uuid] self.assertEqual(goal.as_dict(), eager_strategy.goal.as_dict()) def test_get_strategy_list_with_filters(self): # NOTE(erakli): we don't create goal in database but links to # goal_id = 1. There is no error in dbapi.create_strategy() method. # Is it right behaviour? strategy1 = utils.create_test_strategy( id=1, uuid=w_utils.generate_uuid(), name="STRATEGY_ID_1", display_name='Strategy 1', ) strategy2 = utils.create_test_strategy( id=2, uuid=w_utils.generate_uuid(), name="STRATEGY_ID_2", display_name='Strategy 2', ) strategy3 = utils.create_test_strategy( id=3, uuid=w_utils.generate_uuid(), name="STRATEGY_ID_3", display_name='Strategy 3', ) self.dbapi.soft_delete_strategy(strategy3['uuid']) res = self.dbapi.get_strategy_list( self.context, filters={'display_name': 'Strategy 1'}) self.assertEqual([strategy1['uuid']], [r.uuid for r in res]) res = self.dbapi.get_strategy_list( self.context, filters={'display_name': 'Strategy 3'}) self.assertEqual([], [r.uuid for r in res]) res = self.dbapi.get_strategy_list( self.context, filters={'goal_id': 1}) self.assertEqual([strategy1['uuid'], strategy2['uuid']], [r.uuid for r in res]) res = self.dbapi.get_strategy_list( self.context, filters={'display_name': 'Strategy 2'}) self.assertEqual([strategy2['uuid']], [r.uuid for r in res]) def test_get_strategy_by_uuid(self): created_strategy = utils.create_test_strategy() strategy = self.dbapi.get_strategy_by_uuid( self.context, created_strategy['uuid']) self.assertEqual(strategy.uuid, created_strategy['uuid']) def test_get_strategy_by_name(self): created_strategy = utils.create_test_strategy() strategy = self.dbapi.get_strategy_by_name( self.context, created_strategy['name']) self.assertEqual(strategy.name, created_strategy['name']) def test_get_strategy_that_does_not_exist(self): self.assertRaises(exception.StrategyNotFound, self.dbapi.get_strategy_by_id, self.context, 404) def test_update_strategy(self): strategy = utils.create_test_strategy() res = self.dbapi.update_strategy( strategy['uuid'], {'display_name': 'updated-model'}) self.assertEqual('updated-model', res.display_name) def test_update_goal_id(self): strategy = utils.create_test_strategy() self.assertRaises(exception.Invalid, self.dbapi.update_strategy, strategy['uuid'], {'uuid': 'new_strategy_id'}) def test_update_strategy_that_does_not_exist(self): self.assertRaises(exception.StrategyNotFound, self.dbapi.update_strategy, 404, {'display_name': ''}) def test_destroy_strategy(self): strategy = utils.create_test_strategy() self.dbapi.destroy_strategy(strategy['uuid']) self.assertRaises(exception.StrategyNotFound, self.dbapi.get_strategy_by_id, self.context, strategy['uuid']) def test_destroy_strategy_that_does_not_exist(self): self.assertRaises(exception.StrategyNotFound, self.dbapi.destroy_strategy, 404) def test_create_strategy_already_exists(self): strategy_id = "STRATEGY_ID" utils.create_test_strategy(name=strategy_id) self.assertRaises(exception.StrategyAlreadyExists, utils.create_test_strategy, name=strategy_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/db/utils.py0000664000175000017500000003235200000000000021560 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Watcher test utilities.""" from oslo_utils import timeutils from watcher.db import api as db_api from watcher.db.sqlalchemy import models from watcher import objects def id_generator(): id_ = 1 while True: yield id_ id_ += 1 def _load_relationships(model, db_data): rel_data = {} relationships = db_api.get_instance()._get_relationships(model) for name, relationship in relationships.items(): related_model = relationship.argument if not db_data.get(name): rel_data[name] = None else: rel_data[name] = related_model(**db_data.get(name)) return rel_data def get_test_audit_template(**kwargs): audit_template_data = { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'), 'goal_id': kwargs.get('goal_id', 1), 'strategy_id': kwargs.get('strategy_id', None), 'name': kwargs.get('name', 'My Audit Template'), 'description': kwargs.get('description', 'Desc. Of My Audit Template'), 'scope': kwargs.get('scope', []), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } # ObjectField doesn't allow None nor dict, so if we want to simulate a # non-eager object loading, the field should not be referenced at all. audit_template_data.update( _load_relationships(models.AuditTemplate, kwargs)) return audit_template_data def create_test_audit_template(**kwargs): """Create test audit template entry in DB and return AuditTemplate DB object. Function to be used to create test AuditTemplate objects in the database. :param kwargs: kwargsargs with overriding values for audit template's attributes. :returns: Test AuditTemplate DB object. """ # noqa: E501 audit_template = get_test_audit_template(**kwargs) # Let DB generate ID if it isn't specified explicitly if 'id' not in kwargs: del audit_template['id'] dbapi = db_api.get_instance() return dbapi.create_audit_template(audit_template) def get_test_audit(**kwargs): audit_data = { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', '10a47dd1-4874-4298-91cf-eff046dbdb8d'), 'name': kwargs.get('name', 'My Audit'), 'audit_type': kwargs.get('audit_type', 'ONESHOT'), 'state': kwargs.get('state', objects.audit.State.PENDING), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), 'parameters': kwargs.get('parameters', {}), 'interval': kwargs.get('interval', '3600'), 'goal_id': kwargs.get('goal_id', 1), 'strategy_id': kwargs.get('strategy_id', None), 'scope': kwargs.get('scope', []), 'auto_trigger': kwargs.get('auto_trigger', False), 'next_run_time': kwargs.get('next_run_time'), 'hostname': kwargs.get('hostname', 'host_1'), 'start_time': kwargs.get('start_time'), 'end_time': kwargs.get('end_time'), 'force': kwargs.get('force', False) } # ObjectField doesn't allow None nor dict, so if we want to simulate a # non-eager object loading, the field should not be referenced at all. audit_data.update(_load_relationships(models.Audit, kwargs)) return audit_data def create_test_audit(**kwargs): """Create test audit entry in DB and return Audit DB object. Function to be used to create test Audit objects in the database. :param kwargs: kwargsargs with overriding values for audit's attributes. :returns: Test Audit DB object. """ audit = get_test_audit(**kwargs) # Let DB generate ID if it isn't specified explicitly if 'id' not in kwargs: del audit['id'] dbapi = db_api.get_instance() return dbapi.create_audit(audit) def get_test_action(**kwargs): action_data = { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', '10a47dd1-4874-4298-91cf-eff046dbdb8d'), 'action_plan_id': kwargs.get('action_plan_id', 1), 'action_type': kwargs.get('action_type', 'nop'), 'input_parameters': kwargs.get('input_parameters', {'key1': 'val1', 'key2': 'val2', 'resource_id': '10a47dd1-4874-4298-91cf-eff046dbdb8d'}), 'state': kwargs.get('state', objects.action_plan.State.PENDING), 'parents': kwargs.get('parents', []), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } # ObjectField doesn't allow None nor dict, so if we want to simulate a # non-eager object loading, the field should not be referenced at all. action_data.update(_load_relationships(models.Action, kwargs)) return action_data def create_test_action(**kwargs): """Create test action entry in DB and return Action DB object. Function to be used to create test Action objects in the database. :param kwargs: kwargsargs with overriding values for action's attributes. :returns: Test Action DB object. """ action = get_test_action(**kwargs) # Let DB generate ID if it isn't specified explicitly if 'id' not in kwargs: del action['id'] dbapi = db_api.get_instance() return dbapi.create_action(action) def get_test_action_plan(**kwargs): action_plan_data = { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', '76be87bd-3422-43f9-93a0-e85a577e3061'), 'state': kwargs.get('state', objects.action_plan.State.ONGOING), 'audit_id': kwargs.get('audit_id', 1), 'strategy_id': kwargs.get('strategy_id', 1), 'global_efficacy': kwargs.get('global_efficacy', []), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), 'hostname': kwargs.get('hostname', 'host_1'), } # ObjectField doesn't allow None nor dict, so if we want to simulate a # non-eager object loading, the field should not be referenced at all. action_plan_data.update(_load_relationships(models.ActionPlan, kwargs)) return action_plan_data def create_test_action_plan(**kwargs): """Create test action plan entry in DB and return Action Plan DB object. Function to be used to create test Action objects in the database. :param kwargs: kwargsargs with overriding values for action's attributes. :returns: Test Action DB object. """ action = get_test_action_plan(**kwargs) # Let DB generate ID if it isn't specified explicitly if 'id' not in kwargs: del action['id'] dbapi = db_api.get_instance() return dbapi.create_action_plan(action) def get_test_goal(**kwargs): return { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', 'f7ad87ae-4298-91cf-93a0-f35a852e3652'), 'name': kwargs.get('name', 'TEST'), 'display_name': kwargs.get('display_name', 'test goal'), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), 'efficacy_specification': kwargs.get('efficacy_specification', []), } def create_test_goal(**kwargs): """Create test goal entry in DB and return Goal DB object. Function to be used to create test Goal objects in the database. :param kwargs: kwargs which override default goal values of its attributes. :returns: Test Goal DB object. """ goal = get_test_goal(**kwargs) dbapi = db_api.get_instance() return dbapi.create_goal(goal) def get_test_scoring_engine(**kwargs): return { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', 'e8370ede-4f39-11e6-9ffa-08002722cb21'), 'name': kwargs.get('name', 'test-se-01'), 'description': kwargs.get('description', 'test scoring engine 01'), 'metainfo': kwargs.get('metainfo', 'test_attr=test_val'), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } def create_test_scoring_engine(**kwargs): """Create test scoring engine in DB and return ScoringEngine DB object. Function to be used to create test ScoringEngine objects in the database. :param kwargs: kwargs with overriding values for SE'sattributes. :returns: Test ScoringEngine DB object. """ scoring_engine = get_test_scoring_engine(**kwargs) dbapi = db_api.get_instance() return dbapi.create_scoring_engine(scoring_engine) def get_test_strategy(**kwargs): strategy_data = { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', 'cb3d0b58-4415-4d90-b75b-1e96878730e3'), 'name': kwargs.get('name', 'TEST'), 'display_name': kwargs.get('display_name', 'test strategy'), 'goal_id': kwargs.get('goal_id', 1), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), 'parameters_spec': kwargs.get('parameters_spec', {}), } # ObjectField doesn't allow None nor dict, so if we want to simulate a # non-eager object loading, the field should not be referenced at all. strategy_data.update(_load_relationships(models.Strategy, kwargs)) return strategy_data def get_test_service(**kwargs): return { 'id': kwargs.get('id', 1), 'name': kwargs.get('name', 'watcher-service'), 'host': kwargs.get('host', 'controller'), 'last_seen_up': kwargs.get( 'last_seen_up', timeutils.parse_isotime('2016-09-22T08:32:06').replace(tzinfo=None) ), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } def create_test_service(**kwargs): """Create test service entry in DB and return Service DB object. Function to be used to create test Service objects in the database. :param kwargs: kwargs with overriding values for service's attributes. :returns: Test Service DB object. """ service = get_test_service(**kwargs) dbapi = db_api.get_instance() return dbapi.create_service(service) def create_test_strategy(**kwargs): """Create test strategy entry in DB and return Strategy DB object. Function to be used to create test Strategy objects in the database. :param kwargs: kwargs with overriding values for strategy's attributes. :returns: Test Strategy DB object. """ strategy = get_test_strategy(**kwargs) dbapi = db_api.get_instance() return dbapi.create_strategy(strategy) def get_test_efficacy_indicator(**kwargs): return { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', '202cfcf9-811c-411a-8a35-d8351f64eb24'), 'name': kwargs.get('name', 'test_indicator'), 'description': kwargs.get('description', 'Test indicator'), 'unit': kwargs.get('unit', '%'), 'value': kwargs.get('value', 0), 'action_plan_id': kwargs.get('action_plan_id', 1), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } def create_test_efficacy_indicator(**kwargs): """Create and return a test efficacy indicator entry in DB. Function to be used to create test EfficacyIndicator objects in the DB. :param kwargs: kwargs for overriding the values of the attributes :returns: Test EfficacyIndicator DB object. """ efficacy_indicator = get_test_efficacy_indicator(**kwargs) # Let DB generate ID if it isn't specified explicitly if 'id' not in kwargs: del efficacy_indicator['id'] dbapi = db_api.get_instance() return dbapi.create_efficacy_indicator(efficacy_indicator) def get_test_action_desc(**kwargs): return { 'id': kwargs.get('id', 1), 'action_type': kwargs.get('action_type', 'nop'), 'description': kwargs.get('description', 'Logging a NOP message'), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } def create_test_action_desc(**kwargs): """Create test action description entry in DB and return ActionDescription. Function to be used to create test ActionDescription objects in the DB. :param kwargs: kwargs with overriding values for service's attributes. :returns: Test ActionDescription DB object. """ action_desc = get_test_action_desc(**kwargs) dbapi = db_api.get_instance() return dbapi.create_action_description(action_desc) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6471353 python_watcher-14.0.0/watcher/tests/decision_engine/0000775000175000017500000000000000000000000022576 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/__init__.py0000664000175000017500000000010100000000000024677 0ustar00zuulzuul00000000000000__author__ = 'Jean-Emile DARTOIS ' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6471353 python_watcher-14.0.0/watcher/tests/decision_engine/audit/0000775000175000017500000000000000000000000023704 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/audit/__init__.py0000664000175000017500000000000000000000000026003 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/audit/test_audit_handlers.py0000664000175000017500000005740300000000000030314 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from unittest import mock from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import uuidutils from apscheduler import job from watcher.applier import rpcapi from watcher.common import exception from watcher.common import scheduling from watcher.db.sqlalchemy import api as sq_api from watcher.decision_engine.audit import continuous from watcher.decision_engine.audit import oneshot from watcher.decision_engine.model.collector import manager from watcher.decision_engine.strategy.strategies import base as base_strategy from watcher.decision_engine.strategy.strategies import dummy_strategy from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.decision_engine.model import faker_cluster_state as faker from watcher.tests.objects import utils as obj_utils class TestOneShotAuditHandler(base.DbTestCase): def setUp(self): super(TestOneShotAuditHandler, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.goal = obj_utils.create_test_goal( self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) self.strategy = obj_utils.create_test_strategy( self.context, name=dummy_strategy.DummyStrategy.get_name(), goal_id=self.goal.id) audit_template = obj_utils.create_test_audit_template( self.context, strategy_id=self.strategy.id) self.audit = obj_utils.create_test_audit( self.context, uuid=uuidutils.generate_uuid(), goal_id=self.goal.id, strategy_id=self.strategy.id, audit_template_id=audit_template.id, goal=self.goal) @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") @mock.patch.object(base_strategy.BaseStrategy, "compute_model", mock.Mock(stale=False)) def test_trigger_audit_without_errors(self, m_collector): m_collector.return_value = faker.FakerModelCollector() audit_handler = oneshot.OneShotAuditHandler() audit_handler.execute(self.audit, self.context) expected_calls = [ mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.END), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.END)] self.assertEqual( expected_calls, self.m_audit_notifications.send_action_notification.call_args_list) @mock.patch.object(base_strategy.BaseStrategy, "do_execute") @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") def test_trigger_audit_with_error(self, m_collector, m_do_execute): m_collector.return_value = faker.FakerModelCollector() m_do_execute.side_effect = Exception audit_handler = oneshot.OneShotAuditHandler() audit_handler.execute(self.audit, self.context) expected_calls = [ mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, priority=objects.fields.NotificationPriority.ERROR, phase=objects.fields.NotificationPhase.ERROR)] self.assertEqual( expected_calls, self.m_audit_notifications.send_action_notification.call_args_list) @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") @mock.patch.object(base_strategy.BaseStrategy, "compute_model", mock.Mock(stale=False)) def test_trigger_audit_state_succeeded(self, m_collector): m_collector.return_value = faker.FakerModelCollector() audit_handler = oneshot.OneShotAuditHandler() audit_handler.execute(self.audit, self.context) audit = objects.audit.Audit.get_by_uuid(self.context, self.audit.uuid) self.assertEqual(objects.audit.State.SUCCEEDED, audit.state) expected_calls = [ mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.END), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.END)] self.assertEqual( expected_calls, self.m_audit_notifications.send_action_notification.call_args_list) @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") @mock.patch.object(base_strategy.BaseStrategy, "compute_model", mock.Mock(stale=False)) def test_trigger_audit_send_notification(self, m_collector): m_collector.return_value = faker.FakerModelCollector() audit_handler = oneshot.OneShotAuditHandler() audit_handler.execute(self.audit, self.context) expected_calls = [ mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.END), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.END)] self.assertEqual( expected_calls, self.m_audit_notifications.send_action_notification.call_args_list) class TestAutoTriggerActionPlan(base.DbTestCase): def setUp(self): super(TestAutoTriggerActionPlan, self).setUp() self.goal = obj_utils.create_test_goal( self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) self.strategy = obj_utils.create_test_strategy( self.context, name=dummy_strategy.DummyStrategy.get_name(), goal_id=self.goal.id) audit_template = obj_utils.create_test_audit_template( self.context) self.audit = obj_utils.create_test_audit( self.context, id=0, uuid=uuidutils.generate_uuid(), audit_template_id=audit_template.id, goal_id=self.goal.id, audit_type=objects.audit.AuditType.CONTINUOUS.value, goal=self.goal, auto_trigger=True) self.ongoing_action_plan = obj_utils.create_test_action_plan( self.context, uuid=uuidutils.generate_uuid(), audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy, ) self.recommended_action_plan = obj_utils.create_test_action_plan( self.context, uuid=uuidutils.generate_uuid(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy, ) @mock.patch.object(oneshot.OneShotAuditHandler, 'do_execute') @mock.patch.object(objects.action_plan.ActionPlan, 'list') def test_trigger_audit_with_actionplan_ongoing(self, mock_list, mock_do_execute): mock_list.return_value = [self.ongoing_action_plan] audit_handler = oneshot.OneShotAuditHandler() audit_handler.execute(self.audit, self.context) self.assertFalse(mock_do_execute.called) @mock.patch.object(rpcapi.ApplierAPI, 'launch_action_plan') @mock.patch.object(objects.action_plan.ActionPlan, 'list') @mock.patch.object(objects.audit.Audit, 'get_by_id') def test_trigger_action_plan_without_ongoing(self, mock_get_by_id, mock_list, mock_applier): mock_get_by_id.return_value = self.audit mock_list.return_value = [] auto_trigger_handler = oneshot.OneShotAuditHandler() with mock.patch.object(auto_trigger_handler, 'do_schedule') as m_schedule: m_schedule().uuid = self.recommended_action_plan.uuid auto_trigger_handler.post_execute(self.audit, mock.MagicMock(), self.context) mock_applier.assert_called_once_with(self.context, self.recommended_action_plan.uuid) @mock.patch.object(oneshot.OneShotAuditHandler, 'do_execute') def test_trigger_audit_with_force(self, mock_do_execute): audit_handler = oneshot.OneShotAuditHandler() self.audit.force = True audit_handler.execute(self.audit, self.context) self.assertTrue(mock_do_execute.called) class TestContinuousAuditHandler(base.DbTestCase): def setUp(self): super(TestContinuousAuditHandler, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.goal = obj_utils.create_test_goal( self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) audit_template = obj_utils.create_test_audit_template( self.context) self.audits = [ obj_utils.create_test_audit( self.context, id=id_, name='My Audit {0}'.format(id_), uuid=uuidutils.generate_uuid(), audit_template_id=audit_template.id, goal_id=self.goal.id, audit_type=objects.audit.AuditType.CONTINUOUS.value, goal=self.goal, hostname='hostname1') for id_ in range(2, 4)] cfg.CONF.set_override("host", "hostname1") @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api.enginefacade.writer, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_launch_audits_periodically_with_interval( self, mock_list, mock_jobs, m_add_job, m_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits self.audits[0].next_run_time = (datetime.datetime.now() - datetime.timedelta(seconds=1800)) mock_jobs.return_value = mock.MagicMock() m_engine.return_value = mock.MagicMock() m_add_job.return_value = mock.MagicMock() audit_handler.launch_audits_periodically() m_service.assert_called() m_engine.assert_called() m_add_job.assert_called() mock_jobs.assert_called() self.assertIsNotNone(self.audits[0].next_run_time) self.assertIsNone(self.audits[1].next_run_time) @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api.enginefacade.writer, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_launch_audits_periodically_with_cron( self, mock_list, mock_jobs, m_add_job, m_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits self.audits[0].interval = "*/5 * * * *" mock_jobs.return_value = mock.MagicMock() m_engine.return_value = mock.MagicMock() m_add_job.return_value = mock.MagicMock() audit_handler.launch_audits_periodically() m_service.assert_called() m_engine.assert_called() m_add_job.assert_called() mock_jobs.assert_called() self.assertIsNotNone(self.audits[0].next_run_time) self.assertIsNone(self.audits[1].next_run_time) @mock.patch.object(continuous.ContinuousAuditHandler, '_next_cron_time') @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api.enginefacade.writer, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_launch_audits_periodically_with_invalid_cron( self, mock_list, mock_jobs, m_add_job, m_engine, m_service, mock_cron): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits self.audits[0].interval = "*/5* * * *" mock_cron.side_effect = exception.CronFormatIsInvalid mock_jobs.return_value = mock.MagicMock() m_engine.return_value = mock.MagicMock() m_add_job.return_value = mock.MagicMock() self.assertRaises(exception.CronFormatIsInvalid, audit_handler.launch_audits_periodically) @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api.enginefacade.writer, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_launch_multiply_audits_periodically(self, mock_list, mock_jobs, m_add_job, m_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits mock_jobs.return_value = mock.MagicMock() m_engine.return_value = mock.MagicMock() m_service.return_value = mock.MagicMock() calls = [mock.call(audit_handler.execute_audit, 'interval', args=[mock.ANY, mock.ANY], seconds=3600, name='execute_audit', next_run_time=mock.ANY) for _ in self.audits] audit_handler.launch_audits_periodically() m_add_job.assert_has_calls(calls) @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api.enginefacade.writer, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_period_audit_not_called_when_deleted(self, mock_list, mock_jobs, m_add_job, m_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits mock_jobs.return_value = mock.MagicMock() m_service.return_value = mock.MagicMock() m_engine.return_value = mock.MagicMock() ap_jobs = [job.Job(mock.MagicMock(), name='execute_audit', func=audit_handler.execute_audit, args=(self.audits[0], mock.MagicMock()), kwargs={}), job.Job(mock.MagicMock(), name='execute_audit', func=audit_handler.execute_audit, args=(self.audits[1], mock.MagicMock()), kwargs={}) ] mock_jobs.return_value = ap_jobs audit_handler.launch_audits_periodically() audit_handler.update_audit_state(self.audits[1], objects.audit.State.CANCELLED) audit_handler.update_audit_state(self.audits[0], objects.audit.State.SUSPENDED) is_inactive = audit_handler._is_audit_inactive(self.audits[1]) self.assertTrue(is_inactive) is_inactive = audit_handler._is_audit_inactive(self.audits[0]) self.assertTrue(is_inactive) @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api.enginefacade.writer, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.AuditStateTransitionManager, 'is_inactive') @mock.patch.object(continuous.ContinuousAuditHandler, 'execute') def test_execute_audit_with_interval_no_job( self, m_execute, m_is_inactive, m_get_jobs, m_get_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() self.audits[0].next_run_time = (datetime.datetime.now() - datetime.timedelta(seconds=1800)) m_is_inactive.return_value = True m_get_jobs.return_value = [] audit_handler.execute_audit(self.audits[0], self.context) self.assertIsNotNone(self.audits[0].next_run_time) @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api.enginefacade.writer, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'remove_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_launch_audits_periodically_with_diff_interval( self, mock_list, mock_jobs, m_add_job, m_remove_job, m_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits self.audits[0].next_run_time = (datetime.datetime.now() - datetime.timedelta(seconds=1800)) m_job1 = mock.MagicMock() m_job1.name = 'execute_audit' m_audit = mock.MagicMock() m_audit.uuid = self.audits[0].uuid m_audit.interval = 60 m_job1.args = [m_audit] mock_jobs.return_value = [m_job1] m_engine.return_value = mock.MagicMock() m_add_job.return_value = mock.MagicMock() audit_handler.launch_audits_periodically() m_service.assert_called() m_engine.assert_called() m_add_job.assert_called() mock_jobs.assert_called() self.assertIsNotNone(self.audits[0].next_run_time) self.assertIsNone(self.audits[1].next_run_time) audit_handler.launch_audits_periodically() m_remove_job.assert_called() @mock.patch.object(continuous.ContinuousAuditHandler, 'get_planner', mock.Mock()) @mock.patch.object(base_strategy.BaseStrategy, "compute_model", mock.Mock(stale=False)) def test_execute_audit(self): audit_handler = continuous.ContinuousAuditHandler() audit_handler.execute_audit(self.audits[0], self.context) expected_calls = [ mock.call(self.context, self.audits[0], action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audits[0], action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.END), mock.call(self.context, self.audits[0], action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audits[0], action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.END)] self.assertEqual( expected_calls, self.m_audit_notifications.send_action_notification.call_args_list) @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') def test_is_audit_inactive(self, mock_jobs): audit_handler = continuous.ContinuousAuditHandler() mock_jobs.return_value = mock.MagicMock() audit_handler._audit_scheduler = mock.MagicMock() ap_jobs = [job.Job(mock.MagicMock(), name='execute_audit', func=audit_handler.execute_audit, args=(self.audits[0], mock.MagicMock()), kwargs={}), ] audit_handler.update_audit_state(self.audits[1], objects.audit.State.CANCELLED) mock_jobs.return_value = ap_jobs is_inactive = audit_handler._is_audit_inactive(self.audits[1]) self.assertTrue(is_inactive) is_inactive = audit_handler._is_audit_inactive(self.audits[0]) self.assertFalse(is_inactive) def test_check_audit_expired(self): current = timeutils.utcnow() # start_time and end_time are None audit_handler = continuous.ContinuousAuditHandler() result = audit_handler.check_audit_expired(self.audits[0]) self.assertFalse(result) self.assertIsNone(self.audits[0].start_time) self.assertIsNone(self.audits[0].end_time) # current time < start_time and end_time is None self.audits[0].start_time = current+datetime.timedelta(days=1) result = audit_handler.check_audit_expired(self.audits[0]) self.assertTrue(result) self.assertIsNone(self.audits[0].end_time) # current time is between start_time and end_time self.audits[0].start_time = current-datetime.timedelta(days=1) self.audits[0].end_time = current+datetime.timedelta(days=1) result = audit_handler.check_audit_expired(self.audits[0]) self.assertFalse(result) # current time > end_time self.audits[0].end_time = current-datetime.timedelta(days=1) result = audit_handler.check_audit_expired(self.audits[0]) self.assertTrue(result) self.assertEqual(objects.audit.State.SUCCEEDED, self.audits[0].state) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6471353 python_watcher-14.0.0/watcher/tests/decision_engine/cluster/0000775000175000017500000000000000000000000024257 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/cluster/__init__.py0000664000175000017500000000000000000000000026356 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/cluster/test_cinder_cdmc.py0000664000175000017500000001224000000000000030121 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.common import cinder_helper from watcher.common import exception from watcher.decision_engine.model.collector import cinder from watcher.tests import base from watcher.tests import conf_fixture class TestCinderClusterDataModelCollector(base.TestCase): def setUp(self): super(TestCinderClusterDataModelCollector, self).setUp() self.useFixture(conf_fixture.ConfReloadFixture()) @mock.patch('keystoneclient.v3.client.Client', mock.Mock()) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_cdmc_execute(self, m_cinder_helper_cls): m_cinder_helper = mock.Mock(name="cinder_helper") m_cinder_helper_cls.return_value = m_cinder_helper fake_storage_node = mock.Mock( host='host@backend', zone='zone', status='enabled', state='up', volume_type=['fake_type'] ) fake_storage_pool = mock.Mock( total_volumes=1, total_capacity_gb=30, free_capacity_gb=20, provisioned_capacity_gb=10, allocated_capacity_gb=10, virtual_free=20 ) setattr(fake_storage_pool, 'name', 'host@backend#pool') fake_volume = mock.Mock( id=1, size=1, status='in-use', attachments=[{"server_id": "server_id", "attachment_id": "attachment_id"}], multiattach='false', snapshot_id='', metadata='{"key": "value"}', bootable='false' ) setattr(fake_volume, 'name', 'name') setattr(fake_volume, 'os-vol-tenant-attr:tenant_id', '0c003652-0cb1-4210-9005-fd5b92b1faa2') setattr(fake_volume, 'os-vol-host-attr:host', 'host@backend#pool') # storage node list m_cinder_helper.get_storage_node_list.return_value = [ fake_storage_node] m_cinder_helper.get_volume_type_by_backendname.return_value = [ 'fake_type'] # storage pool list m_cinder_helper.get_storage_pool_list.return_value = [ fake_storage_pool] # volume list m_cinder_helper.get_volume_list.return_value = [fake_volume] m_config = mock.Mock() m_osc = mock.Mock() cinder_cdmc = cinder.CinderClusterDataModelCollector( config=m_config, osc=m_osc) cinder_cdmc.get_audit_scope_handler([]) model = cinder_cdmc.execute() storage_nodes = model.get_all_storage_nodes() storage_node = list(storage_nodes.values())[0] storage_pools = model.get_node_pools(storage_node) storage_pool = storage_pools[0] volumes = model.get_pool_volumes(storage_pool) volume = volumes[0] self.assertEqual(1, len(storage_nodes)) self.assertEqual(1, len(storage_pools)) self.assertEqual(1, len(volumes)) self.assertEqual(storage_node.host, 'host@backend') self.assertEqual(storage_pool.name, 'host@backend#pool') self.assertEqual(volume.uuid, '1') @mock.patch('keystoneclient.v3.client.Client', mock.Mock()) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_cdmc_total_capacity_gb_not_integer( self, m_cinder_helper_cls): m_cinder_helper = mock.Mock(name="cinder_helper") m_cinder_helper_cls.return_value = m_cinder_helper fake_storage_node = mock.Mock( host='host@backend', zone='zone', status='enabled', state='up', volume_type=['fake_type'] ) fake_storage_pool = mock.Mock( total_volumes=1, total_capacity_gb="unknown", free_capacity_gb=20, provisioned_capacity_gb=10, allocated_capacity_gb=10, virtual_free=20 ) setattr(fake_storage_pool, 'name', 'host@backend#pool') # storage node list m_cinder_helper.get_storage_node_list.return_value = [ fake_storage_node] m_cinder_helper.get_volume_type_by_backendname.return_value = [ 'fake_type'] # storage pool list m_cinder_helper.get_storage_pool_list.return_value = [ fake_storage_pool] # volume list m_cinder_helper.get_volume_list.return_value = [] m_config = mock.Mock() m_osc = mock.Mock() cinder_cdmc = cinder.CinderClusterDataModelCollector( config=m_config, osc=m_osc) cinder_cdmc.get_audit_scope_handler([]) self.assertRaises(exception.InvalidPoolAttributeValue, cinder_cdmc.execute) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/cluster/test_cluster_data_model_collector.py0000664000175000017500000000573200000000000033577 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.decision_engine.model.collector import base from watcher.decision_engine.model.collector import cinder from watcher.decision_engine.model.collector import ironic from watcher.decision_engine.model.collector import nova from watcher.decision_engine.model import model_root from watcher.tests import base as test_base class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): @property def notification_endpoints(self): return [] def get_audit_scope_handler(self, audit_scope): return None def execute(self): model = model_root.ModelRoot() # Do something here... return model class TestClusterDataModelCollector(test_base.TestCase): def test_is_singleton(self): m_config = mock.Mock() inst1 = DummyClusterDataModelCollector(config=m_config) inst2 = DummyClusterDataModelCollector(config=m_config) self.assertIs(inst1, inst2) def test_in_memory_model_is_copied(self): m_config = mock.Mock() collector = DummyClusterDataModelCollector(config=m_config) collector.synchronize() self.assertIs( collector._cluster_data_model, collector.cluster_data_model) self.assertIsNot( collector.cluster_data_model, collector.get_latest_cluster_data_model()) class TestComputeDataModelCollector(test_base.TestCase): def test_model_scope_is_none(self): m_config = mock.Mock() collector = nova.NovaClusterDataModelCollector(config=m_config) collector._audit_scope_handler = mock.Mock() collector._data_model_scope = None self.assertIsNone(collector.execute()) class TestStorageDataModelCollector(test_base.TestCase): def test_model_scope_is_none(self): m_config = mock.Mock() collector = cinder.CinderClusterDataModelCollector(config=m_config) collector._audit_scope_handler = mock.Mock() collector._data_model_scope = None self.assertIsNone(collector.execute()) class TestBareMetalDataModelCollector(test_base.TestCase): def test_model_scope_is_none(self): m_config = mock.Mock() collector = ironic.BaremetalClusterDataModelCollector(config=m_config) collector._audit_scope_handler = mock.Mock() collector._data_model_scope = None self.assertIsNone(collector.execute()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/cluster/test_nova_cdmc.py0000664000175000017500000004617500000000000027636 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os_resource_classes as orc from unittest import mock from watcher.common import nova_helper from watcher.common import placement_helper from watcher.decision_engine.model.collector import nova from watcher.tests import base from watcher.tests import conf_fixture class TestNovaClusterDataModelCollector(base.TestCase): def setUp(self): super(TestNovaClusterDataModelCollector, self).setUp() self.useFixture(conf_fixture.ConfReloadFixture()) @mock.patch('keystoneclient.v3.client.Client', mock.Mock()) @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, 'NovaHelper') def test_nova_cdmc_execute(self, m_nova_helper_cls, m_placement_helper_cls): m_placement_helper = mock.Mock(name="placement_helper") m_placement_helper.get_inventories.return_value = { orc.VCPU: { "allocation_ratio": 16.0, "total": 8, "reserved": 0, "step_size": 1, "min_unit": 1, "max_unit": 8}, orc.MEMORY_MB: { "allocation_ratio": 1.5, "total": 16039, "reserved": 512, "step_size": 1, "min_unit": 1, "max_unit": 16039}, orc.DISK_GB: { "allocation_ratio": 1.0, "total": 142, "reserved": 0, "step_size": 1, "min_unit": 1, "max_unit": 142} } m_placement_helper.get_usages_for_resource_provider.return_value = { orc.DISK_GB: 10, orc.MEMORY_MB: 100, orc.VCPU: 0 } m_placement_helper_cls.return_value = m_placement_helper m_nova_helper = mock.Mock(name="nova_helper") m_nova_helper_cls.return_value = m_nova_helper m_nova_helper.get_service.return_value = mock.Mock( id=1355, host='test_hostname', binary='nova-compute', status='enabled', state='up', disabled_reason='', ) minimal_node = dict( id='160a0e7b-8b0b-4854-8257-9c71dff4efcc', hypervisor_hostname='test_hostname', state='TEST_STATE', status='TEST_STATUS', ) minimal_node_with_servers = dict( servers=[ {'name': 'fake_instance', 'uuid': 'ef500f7e-dac8-470f-960c-169486fce71b'} ], **minimal_node ) fake_compute_node = mock.Mock( service={'id': 123, 'host': 'test_hostname', 'disabled_reason': ''}, memory_mb=333, memory_mb_used=100, free_disk_gb=222, local_gb=111, local_gb_used=10, vcpus=4, vcpus_used=0, servers=None, # Don't let the mock return a value for servers. **minimal_node ) fake_detailed_node = mock.Mock( service={'id': 123, 'host': 'test_hostname', 'disabled_reason': ''}, memory_mb=333, memory_mb_used=100, free_disk_gb=222, local_gb=111, local_gb_used=10, vcpus=4, vcpus_used=0, **minimal_node_with_servers) fake_instance = mock.Mock( id='ef500f7e-dac8-470f-960c-169486fce71b', name='fake_instance', flavor={'ram': 333, 'disk': 222, 'vcpus': 4, 'id': 1}, metadata={'hi': 'hello'}, tenant_id='ff560f7e-dbc8-771f-960c-164482fce21b', ) setattr(fake_instance, 'OS-EXT-STS:vm_state', 'VM_STATE') setattr(fake_instance, 'name', 'fake_instance') # Returns the hypervisors with details (service) but no servers. m_nova_helper.get_compute_node_list.return_value = [fake_compute_node] # Returns the hypervisor with servers and details (service). m_nova_helper.get_compute_node_by_name.return_value = [ fake_detailed_node] # Returns the hypervisor with details (service) but no servers. m_nova_helper.get_instance_list.return_value = [fake_instance] m_config = mock.Mock() m_osc = mock.Mock() nova_cdmc = nova.NovaClusterDataModelCollector( config=m_config, osc=m_osc) nova_cdmc.get_audit_scope_handler([]) model = nova_cdmc.execute() compute_nodes = model.get_all_compute_nodes() instances = model.get_all_instances() self.assertEqual(1, len(compute_nodes)) self.assertEqual(1, len(instances)) node = list(compute_nodes.values())[0] instance = list(instances.values())[0] self.assertEqual(node.uuid, '160a0e7b-8b0b-4854-8257-9c71dff4efcc') self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b') memory_total = (node.memory-node.memory_mb_reserved)*node.memory_ratio self.assertEqual(node.memory_mb_capacity, memory_total) disk_total = (node.disk-node.disk_gb_reserved)*node.disk_ratio self.assertEqual(node.disk_gb_capacity, disk_total) vcpus_total = (node.vcpus-node.vcpu_reserved)*node.vcpu_ratio self.assertEqual(node.vcpu_capacity, vcpus_total) m_nova_helper.get_compute_node_by_name.assert_called_once_with( minimal_node['hypervisor_hostname'], servers=True, detailed=True) m_nova_helper.get_instance_list.assert_called_once_with( filters={'host': fake_compute_node.service['host']}, limit=1) class TestNovaModelBuilder(base.TestCase): @mock.patch.object(nova_helper, 'NovaHelper', mock.MagicMock()) def test_add_instance_node(self): model_builder = nova.NovaModelBuilder(osc=mock.MagicMock()) model_builder.model = mock.MagicMock() mock_node = mock.MagicMock() mock_host = mock_node.service["host"] inst1 = mock.MagicMock( id='ef500f7e-dac8-470f-960c-169486fce711', tenant_id='ff560f7e-dbc8-771f-960c-164482fce21b') setattr(inst1, 'OS-EXT-STS:vm_state', 'deleted') setattr(inst1, 'name', 'instance1') inst2 = mock.MagicMock( id='ef500f7e-dac8-470f-960c-169486fce722', tenant_id='ff560f7e-dbc8-771f-960c-164482fce21b') setattr(inst2, 'OS-EXT-STS:vm_state', 'active') setattr(inst2, 'name', 'instance2') mock_instances = [inst1, inst2] model_builder.nova_helper.get_instance_list.return_value = ( mock_instances) model_builder.add_instance_node(mock_node, mock_instances) # verify that when len(instances) <= 1000, limit == len(instance). model_builder.nova_helper.get_instance_list.assert_called_once_with( filters={'host': mock_host}, limit=2) fake_instance = model_builder._build_instance_node(inst2) model_builder.model.add_instance.assert_called_once_with( fake_instance) # verify that when len(instances) > 1000, limit == -1. mock_instance = mock.Mock() mock_instances = [mock_instance] * 1001 model_builder.add_instance_node(mock_node, mock_instances) model_builder.nova_helper.get_instance_list.assert_called_with( filters={'host': mock_host}, limit=-1) def test_check_model(self): """Initialize collector ModelBuilder and test check model""" m_scope = [{"compute": [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ]}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) self.assertTrue(t_nova_cluster._check_model_scope(m_scope)) def test_check_model_update_false(self): """Initialize check model with multiple identical scopes The seconds check_model should return false as the models are the same """ m_scope = [{"compute": [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ]}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) self.assertTrue(t_nova_cluster._check_model_scope(m_scope)) self.assertFalse(t_nova_cluster._check_model_scope(m_scope)) def test_check_model_update_true(self): """Initialize check model with multiple different scopes Since the models differ both should return True for the update flag """ m_scope_one = [{"compute": [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ]}] m_scope_two = [{"compute": [ {"host_aggregates": [{"id": 2}]}, {"availability_zones": [{"name": "av_b"}]} ]}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) self.assertTrue(t_nova_cluster._check_model_scope(m_scope_one)) self.assertTrue(t_nova_cluster._check_model_scope(m_scope_two)) def test_merge_compute_scope(self): """""" m_scope_one = [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ] m_scope_two = [ {"host_aggregates": [{"id": 4}]}, {"availability_zones": [{"name": "av_b"}]} ] reference = {'availability_zones': [{'name': 'av_a'}, {'name': 'av_b'}], 'host_aggregates': [{'id': 5}, {'id': 4}]} t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) t_nova_cluster._merge_compute_scope(m_scope_one) t_nova_cluster._merge_compute_scope(m_scope_two) self.assertEqual(reference, t_nova_cluster.model_scope) @mock.patch.object(nova_helper, 'NovaHelper') def test_collect_aggregates(self, m_nova): """""" m_nova.return_value.get_aggregate_list.return_value = \ [mock.Mock(id=1, name='example'), mock.Mock(id=5, name='example', hosts=['hostone', 'hosttwo'])] m_nova.return_value.get_compute_node_by_name.return_value = False m_scope = [{'id': 5}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) result = set() t_nova_cluster._collect_aggregates(m_scope, result) self.assertEqual(set(['hostone', 'hosttwo']), result) @mock.patch.object(nova_helper, 'NovaHelper') def test_collect_aggregates_none(self, m_nova): """Test collect_aggregates with host_aggregates None""" result = set() t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) t_nova_cluster._collect_aggregates(None, result) self.assertEqual(set(), result) @mock.patch.object(nova_helper, 'NovaHelper') def test_collect_zones(self, m_nova): """""" m_nova.return_value.get_service_list.return_value = \ [mock.Mock(zone='av_b'), mock.Mock(zone='av_a', host='hostone')] m_nova.return_value.get_compute_node_by_name.return_value = False m_scope = [{'name': 'av_a'}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) result = set() t_nova_cluster._collect_zones(m_scope, result) self.assertEqual(set(['hostone']), result) @mock.patch.object(nova_helper, 'NovaHelper') def test_collect_zones_none(self, m_nova): """Test collect_zones with availability_zones None""" result = set() t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) t_nova_cluster._collect_zones(None, result) self.assertEqual(set(), result) @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, 'NovaHelper') def test_add_physical_layer(self, m_nova, m_placement): """Ensure all three steps of the physical layer are fully executed First the return value for get_aggregate_list and get_service_list are mocked. These return 3 hosts of which hostone is returned by both the aggregate and service call. This will help verify the elimination of duplicates. The scope is setup so that only hostone and hosttwo should remain. There will be 2 simulated compute nodes and 2 associated instances. These will be returned by their matching calls in nova helper. The calls to get_compute_node_by_name and get_instance_list are asserted as to verify the correct operation of add_physical_layer. """ mock_placement = mock.Mock(name="placement_helper") mock_placement.get_inventories.return_value = dict() mock_placement.get_usages_for_resource_provider.return_value = None m_placement.return_value = mock_placement m_nova.return_value.get_aggregate_list.return_value = \ [mock.Mock(id=1, name='example'), mock.Mock(id=5, name='example', hosts=['hostone', 'hosttwo'])] m_nova.return_value.get_service_list.return_value = \ [mock.Mock(zone='av_b', host='hostthree'), mock.Mock(zone='av_a', host='hostone')] compute_node_one = mock.Mock( id='796fee99-65dd-4262-aa-fd2a1143faa6', hypervisor_hostname='hostone', hypervisor_type='QEMU', state='TEST_STATE', status='TEST_STATUS', memory_mb=333, memory_mb_used=100, free_disk_gb=222, local_gb=111, local_gb_used=10, vcpus=4, vcpus_used=0, servers=[ {'name': 'fake_instance', 'uuid': 'ef500f7e-dac8-470f-960c-169486fce71b'} ], service={'id': 123, 'host': 'hostone', 'disabled_reason': ''}, ) compute_node_two = mock.Mock( id='756fef99-65dd-4262-aa-fd2a1143faa6', hypervisor_hostname='hosttwo', hypervisor_type='QEMU', state='TEST_STATE', status='TEST_STATUS', memory_mb=333, memory_mb_used=100, free_disk_gb=222, local_gb=111, local_gb_used=10, vcpus=4, vcpus_used=0, servers=[ {'name': 'fake_instance2', 'uuid': 'ef500f7e-dac8-47f0-960c-169486fce71b'} ], service={'id': 123, 'host': 'hosttwo', 'disabled_reason': ''}, ) m_nova.return_value.get_compute_node_by_name.side_effect = [ [compute_node_one], [compute_node_two] ] fake_instance_one = mock.Mock( id='796fee99-65dd-4262-aa-fd2a1143faa6', name='fake_instance', flavor={'ram': 333, 'disk': 222, 'vcpus': 4, 'id': 1}, metadata={'hi': 'hello'}, tenant_id='ff560f7e-dbc8-771f-960c-164482fce21b', ) fake_instance_two = mock.Mock( id='ef500f7e-dac8-47f0-960c-169486fce71b', name='fake_instance2', flavor={'ram': 333, 'disk': 222, 'vcpus': 4, 'id': 1}, metadata={'hi': 'hello'}, tenant_id='756fef99-65dd-4262-aa-fd2a1143faa6', ) m_nova.return_value.get_instance_list.side_effect = [ [fake_instance_one], [fake_instance_two] ] m_scope = [{"compute": [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ]}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) t_nova_cluster.execute(m_scope) m_nova.return_value.get_compute_node_by_name.assert_any_call( 'hostone', servers=True, detailed=True) m_nova.return_value.get_compute_node_by_name.assert_any_call( 'hosttwo', servers=True, detailed=True) self.assertEqual( m_nova.return_value.get_compute_node_by_name.call_count, 2) m_nova.return_value.get_instance_list.assert_any_call( filters={'host': 'hostone'}, limit=1) m_nova.return_value.get_instance_list.assert_any_call( filters={'host': 'hosttwo'}, limit=1) self.assertEqual( m_nova.return_value.get_instance_list.call_count, 2) @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, 'NovaHelper') def test_add_physical_layer_with_baremetal_node(self, m_nova, m_placement_helper): """""" mock_placement = mock.Mock(name="placement_helper") mock_placement.get_inventories.return_value = dict() mock_placement.get_usages_for_resource_provider.return_value = None m_placement_helper.return_value = mock_placement m_nova.return_value.get_aggregate_list.return_value = \ [mock.Mock(id=1, name='example'), mock.Mock(id=5, name='example', hosts=['hostone', 'hosttwo'])] m_nova.return_value.get_service_list.return_value = \ [mock.Mock(zone='av_b', host='hostthree'), mock.Mock(zone='av_a', host='hostone')] compute_node = mock.Mock( id='796fee99-65dd-4262-aa-fd2a1143faa6', hypervisor_hostname='hostone', hypervisor_type='QEMU', state='TEST_STATE', status='TEST_STATUS', memory_mb=333, memory_mb_used=100, free_disk_gb=222, local_gb=111, local_gb_used=10, vcpus=4, vcpus_used=0, servers=[ {'name': 'fake_instance', 'uuid': 'ef500f7e-dac8-470f-960c-169486fce71b'} ], service={'id': 123, 'host': 'hostone', 'disabled_reason': ''}, ) baremetal_node = mock.Mock( id='5f2d1b3d-4099-4623-b9-05148aefd6cb', hypervisor_hostname='hosttwo', hypervisor_type='ironic', state='TEST_STATE', status='TEST_STATUS', ) m_nova.return_value.get_compute_node_by_name.side_effect = [ [compute_node], [baremetal_node]] m_scope = [{"compute": [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ]}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) model = t_nova_cluster.execute(m_scope) compute_nodes = model.get_all_compute_nodes() self.assertEqual(1, len(compute_nodes)) m_nova.return_value.get_compute_node_by_name.assert_any_call( 'hostone', servers=True, detailed=True) m_nova.return_value.get_compute_node_by_name.assert_any_call( 'hosttwo', servers=True, detailed=True) self.assertEqual( m_nova.return_value.get_compute_node_by_name.call_count, 2) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6471353 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/0000775000175000017500000000000000000000000025113 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/__init__.py0000664000175000017500000000000000000000000027212 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6471353 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/grafana_translators/0000775000175000017500000000000000000000000031146 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/grafana_translators/__init__.py0000664000175000017500000000000000000000000033245 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/grafana_translators/test_base.py0000664000175000017500000000715600000000000033502 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_config import cfg from oslo_log import log from watcher.common import exception from watcher.decision_engine.datasources.grafana_translator import \ base as base_translator from watcher.tests import base CONF = cfg.CONF LOG = log.getLogger(__name__) class TestGrafanaTranslatorBase(base.BaseTestCase): """Base class for all GrafanaTranslator test classes Objects under test are preceded with t_ and mocked objects are preceded with m_ , additionally, patched objects are preceded with p_ no object under test should be created in setUp this can influence the results. """ def setUp(self): super(TestGrafanaTranslatorBase, self).setUp() """Basic valid reference data""" self.reference_data = { 'metric': 'host_cpu_usage', 'db': 'production', 'attribute': 'hostname', 'query': 'SHOW all_base FROM belong_to_us', 'resource': mock.Mock(hostname='hyperion'), 'resource_type': 'compute_node', 'period': '120', 'aggregate': 'mean', 'granularity': None } class TestBaseGrafanaTranslator(TestGrafanaTranslatorBase): """Test the GrafanaTranslator base class Objects under test are preceded with t_ and mocked objects are preceded with m_ , additionally, patched objects are preceded with p_ no object under test should be created in setUp this can influence the results. """ def setUp(self): super(TestBaseGrafanaTranslator, self).setUp() def test_validate_data(self): """Initialize InfluxDBGrafanaTranslator and check data validation""" t_base_translator = base_translator.BaseGrafanaTranslator( data=self.reference_data) self.assertIsInstance(t_base_translator, base_translator.BaseGrafanaTranslator) def test_validate_data_error(self): """Initialize InfluxDBGrafanaTranslator and check data validation""" self.assertRaises(exception.InvalidParameter, base_translator.BaseGrafanaTranslator, data=[]) def test_extract_attribute(self): """Test that an attribute can be extracted from an object""" m_object = mock.Mock(hostname='test') t_base_translator = base_translator.BaseGrafanaTranslator( data=self.reference_data) self.assertEqual('test', t_base_translator._extract_attribute( m_object, 'hostname')) def test_extract_attribute_error(self): """Test error on attempt to extract none existing attribute""" m_object = mock.Mock(hostname='test') m_object.test = mock.PropertyMock(side_effect=AttributeError) t_base_translator = base_translator.BaseGrafanaTranslator( data=self.reference_data) self.assertRaises(AttributeError, t_base_translator._extract_attribute( m_object, 'test')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/grafana_translators/test_influxdb.py0000664000175000017500000001327000000000000034375 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from unittest import mock from oslo_config import cfg from oslo_log import log from watcher.common import exception from watcher.decision_engine.datasources.grafana_translator import influxdb from watcher.tests.decision_engine.datasources.grafana_translators import \ test_base CONF = cfg.CONF LOG = log.getLogger(__name__) class TestInfluxDBGrafanaTranslator(test_base.TestGrafanaTranslatorBase): """Test the InfluxDB gragana database translator Objects under test are preceded with t_ and mocked objects are preceded with m_ , additionally, patched objects are preceded with p_ no object under test should be created in setUp this can influence the results. """ def setUp(self): super(TestInfluxDBGrafanaTranslator, self).setUp() self.p_conf = mock.patch.object( influxdb, 'CONF', new_callable=mock.PropertyMock) self.m_conf = self.p_conf.start() self.addCleanup(self.p_conf.stop) self.m_conf.grafana_translators.retention_periods = { 'one_day': 86400, 'one_week': 604800 } def test_retention_period_one_day(self): """Validate lowest retention period""" data = copy.copy(self.reference_data) data['query'] = "{4}" t_influx = influxdb.InfluxDBGrafanaTranslator( data=data) params = t_influx.build_params() self.assertEqual(params['q'], 'one_day') def test_retention_period_one_week(self): """Validate incrementing retention periods""" data = copy.copy(self.reference_data) data['query'] = "{4}" data['period'] = 90000 t_influx = influxdb.InfluxDBGrafanaTranslator( data=data) params = t_influx.build_params() self.assertEqual(params['q'], 'one_week') @mock.patch.object(influxdb, 'LOG') def test_retention_period_warning(self, m_log): """Validate retention period warning""" data = copy.copy(self.reference_data) data['query'] = "{4}" data['period'] = 650000 t_influx = influxdb.InfluxDBGrafanaTranslator( data=data) params = t_influx.build_params() self.assertEqual(params['q'], 'one_week') m_log.warning.assert_called_once_with( "Longest retention period is to short for desired period") def test_build_params_granularity(self): """Validate build params granularity""" data = copy.copy(self.reference_data) data['granularity'] = None data['query'] = "{3}" t_influx = influxdb.InfluxDBGrafanaTranslator( data=data) raw_results = { 'db': 'production', 'epoch': 'ms', 'q': '1' } # InfluxDB build_params should replace granularity None optional with 1 result = t_influx.build_params() self.assertEqual(raw_results, result) def test_build_params_order(self): """Validate order of build params""" data = copy.copy(self.reference_data) data['aggregate'] = 'count' # prevent having to deepcopy by keeping this value the same # this will access the value 'hyperion' from the mocked resource object data['attribute'] = 'hostname' data['period'] = 3 # because the period is only 3 the retention_period will be one_day data['granularity'] = 4 data['query'] = "{0}{1}{2}{3}{4}" t_influx = influxdb.InfluxDBGrafanaTranslator( data=data) raw_results = "counthyperion34one_day" result = t_influx.build_params() self.assertEqual(raw_results, result['q']) def test_extract_results(self): """Validate proper result extraction""" t_influx = influxdb.InfluxDBGrafanaTranslator( data=self.reference_data) raw_results = "{ \"results\": [{ \"series\": [{ " \ "\"columns\": [\"time\",\"mean\"]," \ "\"values\": [[1552500855000, " \ "67.3550078657577]]}]}]}" # Structure of InfluxDB time series data # { "results": [{ # "statement_id": 0, # "series": [{ # "name": "cpu_percent", # "columns": [ # "time", # "mean" # ], # "values": [[ # 1552500855000, # 67.3550078657577 # ]] # }] # }]} self.assertEqual(t_influx.extract_result(raw_results), 67.3550078657577) def test_extract_results_error(self): """Validate error on missing results""" t_influx = influxdb.InfluxDBGrafanaTranslator( data=self.reference_data) raw_results = "{}" self.assertRaises(exception.NoSuchMetricForHost, t_influx.extract_result, raw_results) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/test_base.py0000664000175000017500000000452700000000000027446 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_config import cfg from watcher.decision_engine.datasources import base as datasource from watcher.tests import base CONF = cfg.CONF class TestBaseDatasourceHelper(base.BaseTestCase): def test_query_retry(self): exc = Exception() method = mock.Mock() # first call will fail but second will succeed method.side_effect = [exc, True] # Max 2 attempts CONF.set_override("query_max_retries", 2, group='watcher_datasources') # Reduce sleep time to 0 CONF.set_override("query_timeout", 0, group='watcher_datasources') helper = datasource.DataSourceBase() helper.query_retry_reset = mock.Mock() self.assertTrue(helper.query_retry(f=method)) helper.query_retry_reset.assert_called_once_with(exc) def test_query_retry_exception(self): exc = Exception() method = mock.Mock() # only third call will succeed method.side_effect = [exc, exc, True] # Max 2 attempts CONF.set_override("query_max_retries", 2, group='watcher_datasources') # Reduce sleep time to 0 CONF.set_override("query_timeout", 0, group='watcher_datasources') helper = datasource.DataSourceBase() helper.query_retry_reset = mock.Mock() # Maximum number of retries exceeded query_retry should return None self.assertIsNone(helper.query_retry(f=method)) # query_retry_reset should be called twice helper.query_retry_reset.assert_has_calls( [mock.call(exc), mock.call(exc)]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/test_gnocchi_helper.py0000664000175000017500000002122700000000000031501 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime from unittest import mock from oslo_config import cfg from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import gnocchi as gnocchi_helper from watcher.tests import base CONF = cfg.CONF @mock.patch.object(clients.OpenStackClients, 'gnocchi') class TestGnocchiHelper(base.BaseTestCase): def setUp(self): super(TestGnocchiHelper, self).setUp() self.osc_mock = mock.Mock() self.helper = gnocchi_helper.GnocchiHelper(osc=self.osc_mock) stat_agg_patcher = mock.patch.object( self.helper, 'statistic_aggregation', spec=gnocchi_helper.GnocchiHelper.statistic_aggregation) self.mock_aggregation = stat_agg_patcher.start() self.addCleanup(stat_agg_patcher.stop) def test_gnocchi_statistic_aggregation(self, mock_gnocchi): vcpus = 2 mock_instance = mock.Mock( id='16a86790-327a-45f9-bc82-45839f062fdc', vcpus=vcpus) gnocchi = mock.MagicMock() # cpu time rate of change (ns) mock_rate_measure = 360 * 10e+8 * vcpus * 5.5 / 100 expected_result = 5.5 expected_measures = [ ["2017-02-02T09:00:00.000000", 360, mock_rate_measure]] gnocchi.metric.get_measures.return_value = expected_measures mock_gnocchi.return_value = gnocchi helper = gnocchi_helper.GnocchiHelper() result = helper.statistic_aggregation( resource=mock_instance, resource_type='instance', meter_name='instance_cpu_usage', period=300, granularity=360, aggregate='mean', ) self.assertEqual(expected_result, result) gnocchi.metric.get_measures.assert_called_once_with( metric="cpu", start=mock.ANY, stop=mock.ANY, resource_id=mock_instance.uuid, granularity=360, aggregation="rate:mean") def test_gnocchi_statistic_series(self, mock_gnocchi): gnocchi = mock.MagicMock() expected_result = { "2017-02-02T09:00:00.000000": 5.5, "2017-02-02T09:03:60.000000": 5.8 } expected_measures = [ ["2017-02-02T09:00:00.000000", 360, 5.5], ["2017-02-02T09:03:60.000000", 360, 5.8] ] gnocchi.metric.get_measures.return_value = expected_measures mock_gnocchi.return_value = gnocchi start = datetime(year=2017, month=2, day=2, hour=9, minute=0) end = datetime(year=2017, month=2, day=2, hour=9, minute=4) helper = gnocchi_helper.GnocchiHelper() result = helper.statistic_series( resource=mock.Mock(id='16a86790-327a-45f9-bc82-45839f062fdc'), resource_type='instance', meter_name='instance_cpu_usage', start_time=start, end_time=end, granularity=360, ) self.assertEqual(expected_result, result) def test_statistic_aggregation_metric_unavailable(self, mock_gnocchi): helper = gnocchi_helper.GnocchiHelper() # invalidate instance_cpu_usage in metric map original_metric_value = helper.METRIC_MAP.get('instance_cpu_usage') helper.METRIC_MAP.update( instance_cpu_usage=None ) self.assertRaises( exception.MetricNotAvailable, helper.statistic_aggregation, resource=mock.Mock(id='16a86790-327a-45f9-bc82-45839f062fdc'), resource_type='instance', meter_name='instance_cpu_usage', period=300, granularity=360, aggregate='mean', ) # restore the metric map as it is a static attribute that does not get # restored between unit tests! helper.METRIC_MAP.update( instance_cpu_usage=original_metric_value ) def test_get_host_cpu_usage(self, mock_gnocchi): self.helper.get_host_cpu_usage('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_cpu_usage', 600, 'mean', 300) def test_get_host_ram_usage(self, mock_gnocchi): self.helper.get_host_ram_usage('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_ram_usage', 600, 'mean', 300) def test_get_host_outlet_temperature(self, mock_gnocchi): self.helper.get_host_outlet_temp('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_outlet_temp', 600, 'mean', 300) def test_get_host_inlet_temperature(self, mock_gnocchi): self.helper.get_host_inlet_temp('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_inlet_temp', 600, 'mean', 300) def test_get_host_airflow(self, mock_gnocchi): self.helper.get_host_airflow('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_airflow', 600, 'mean', 300) def test_get_host_power(self, mock_gnocchi): self.helper.get_host_power('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_power', 600, 'mean', 300) def test_get_instance_cpu_usage(self, mock_gnocchi): self.helper.get_instance_cpu_usage('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_cpu_usage', 600, 'mean', 300) def test_get_instance_memory_usage(self, mock_gnocchi): self.helper.get_instance_ram_usage('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_ram_usage', 600, 'mean', 300) def test_get_instance_ram_allocated(self, mock_gnocchi): self.helper.get_instance_ram_allocated('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_ram_allocated', 600, 'mean', 300) def test_get_instance_root_disk_allocated(self, mock_gnocchi): self.helper.get_instance_root_disk_size('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_root_disk_size', 600, 'mean', 300) def test_gnocchi_check_availability(self, mock_gnocchi): gnocchi = mock.MagicMock() gnocchi.status.get.return_value = True mock_gnocchi.return_value = gnocchi helper = gnocchi_helper.GnocchiHelper() result = helper.check_availability() self.assertEqual('available', result) def test_gnocchi_check_availability_with_failure(self, mock_gnocchi): cfg.CONF.set_override("query_max_retries", 1, group='watcher_datasources') gnocchi = mock.MagicMock() gnocchi.status.get.side_effect = Exception() mock_gnocchi.return_value = gnocchi helper = gnocchi_helper.GnocchiHelper() self.assertEqual('not available', helper.check_availability()) def test_gnocchi_list_metrics(self, mock_gnocchi): gnocchi = mock.MagicMock() metrics = [{"name": "metric1"}, {"name": "metric2"}] expected_metrics = set(["metric1", "metric2"]) gnocchi.metric.list.return_value = metrics mock_gnocchi.return_value = gnocchi helper = gnocchi_helper.GnocchiHelper() result = helper.list_metrics() self.assertEqual(expected_metrics, result) def test_gnocchi_list_metrics_with_failure(self, mock_gnocchi): cfg.CONF.set_override("query_max_retries", 1, group='watcher_datasources') gnocchi = mock.MagicMock() gnocchi.metric.list.side_effect = Exception() mock_gnocchi.return_value = gnocchi helper = gnocchi_helper.GnocchiHelper() self.assertFalse(helper.list_metrics()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/test_grafana_helper.py0000664000175000017500000002712200000000000031466 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_config import cfg from oslo_log import log from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import grafana from watcher.tests import base from http import HTTPStatus import requests CONF = cfg.CONF LOG = log.getLogger(__name__) @mock.patch.object(clients.OpenStackClients, 'nova', mock.Mock()) class TestGrafana(base.BaseTestCase): """Test the GrafanaHelper datasource Objects under test are preceded with t_ and mocked objects are preceded with m_ , additionally, patched objects are preceded with p_ no object under test should be created in setUp this can influence the results. """ def setUp(self): super(TestGrafana, self).setUp() self.p_conf = mock.patch.object( grafana, 'CONF', new_callable=mock.PropertyMock) self.m_conf = self.p_conf.start() self.addCleanup(self.p_conf.stop) self.m_conf.grafana_client.token = \ "eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk==" self.m_conf.grafana_client.base_url = "https://grafana.proxy/api/" self.m_conf.grafana_client.project_id_map = {'host_cpu_usage': 7221} self.m_conf.grafana_client.database_map = \ {'host_cpu_usage': 'mock_db'} self.m_conf.grafana_client.attribute_map = \ {'host_cpu_usage': 'hostname'} self.m_conf.grafana_client.translator_map = \ {'host_cpu_usage': 'influxdb'} self.m_conf.grafana_client.query_map = \ {'host_cpu_usage': 'SELECT 100-{0}("{0}_value") FROM {3}.' 'cpu_percent WHERE ("host" =~ /^{1}$/ AND ' '"type_instance" =~/^idle$/ AND time > ' '(now()-{2}m)'} self.m_grafana = grafana.GrafanaHelper(osc=mock.Mock()) stat_agg_patcher = mock.patch.object( self.m_grafana, 'statistic_aggregation', spec=grafana.GrafanaHelper.statistic_aggregation) self.mock_aggregation = stat_agg_patcher.start() self.addCleanup(stat_agg_patcher.stop) self.m_compute_node = mock.Mock( id='16a86790-327a-45f9-bc82-45839f062fdc', hostname='example.hostname.ch' ) self.m_instance = mock.Mock( id='73b1ff78-aca7-404f-ac43-3ed16c1fa555', human_id='example.hostname' ) def test_configured(self): """Initialize GrafanaHelper and check if configured is true""" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertTrue(t_grafana.configured) def test_configured_error(self): """Butcher the required configuration and test if configured is false """ self.m_conf.grafana_client.base_url = "" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertFalse(t_grafana.configured) def test_configured_raise_error(self): """Test raising error when using improperly configured GrafanHelper Assure that the _get_metric method raises errors if the metric is missing from the map """ # Clear the METRIC_MAP of Grafana since it is a static variable that # other tests might have set before this test runs. grafana.GrafanaHelper.METRIC_MAP = {} self.m_conf.grafana_client.base_url = "" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertFalse(t_grafana.configured) self.assertEqual({}, t_grafana.METRIC_MAP) self.assertRaises( exception.MetricNotAvailable, t_grafana.get_host_cpu_usage, self.m_compute_node ) @mock.patch.object(requests, 'get') def test_request_raise_error(self, m_request): """Test raising error when status code of request indicates problem Assure that the _request method raises errors if the response indicates problems. """ m_request.return_value = mock.Mock(status_code=HTTPStatus.NOT_FOUND) t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertIsNone(t_grafana.get_host_cpu_usage(self.m_compute_node)) def test_no_metric_raise_error(self): """Test raising error when specified meter does not exist""" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertRaises(exception.MetricNotAvailable, t_grafana.statistic_aggregation, self.m_compute_node, 'none existing meter', 60) @mock.patch.object(grafana.GrafanaHelper, '_request') def test_get_metric_raise_error(self, m_request): """Test raising error when endpoint unable to deliver data for metric """ m_request.return_value.content = "{}" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertRaises(exception.NoSuchMetricForHost, t_grafana.get_host_cpu_usage, self.m_compute_node, 60) def test_metric_builder(self): """Creates valid and invalid sets of configuration for metrics Ensures that a valid metric entry can be configured even if multiple invalid configurations exist for other metrics. """ self.m_conf.grafana_client.project_id_map = { 'host_cpu_usage': 7221, 'host_ram_usage': 7221, 'instance_ram_allocated': 7221, } self.m_conf.grafana_client.database_map = { 'host_cpu_usage': 'mock_db', 'instance_cpu_usage': 'mock_db', 'instance_ram_allocated': 'mock_db', } self.m_conf.grafana_client.attribute_map = { 'host_cpu_usage': 'hostname', 'host_power': 'hostname', 'instance_ram_allocated': 'human_id', } self.m_conf.grafana_client.translator_map = { 'host_cpu_usage': 'influxdb', 'host_inlet_temp': 'influxdb', # validate that invalid entries don't get added 'instance_ram_usage': 'dummy', 'instance_ram_allocated': 'influxdb', } self.m_conf.grafana_client.query_map = { 'host_cpu_usage': 'SHOW SERIES', 'instance_ram_usage': 'SHOW SERIES', 'instance_ram_allocated': 'SHOW SERIES', } expected_result = { 'host_cpu_usage': { 'db': 'mock_db', 'project': 7221, 'attribute': 'hostname', 'translator': 'influxdb', 'query': 'SHOW SERIES'}, 'instance_ram_allocated': { 'db': 'mock_db', 'project': 7221, 'attribute': 'human_id', 'translator': 'influxdb', 'query': 'SHOW SERIES'}, } t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertEqual(t_grafana.METRIC_MAP, expected_result) @mock.patch.object(grafana.GrafanaHelper, '_request') def test_statistic_aggregation(self, m_request): m_request.return_value.content = "{ \"results\": [{ \"series\": [{ " \ "\"columns\": [\"time\",\"mean\"]," \ "\"values\": [[1552500855000, " \ "67.3550078657577]]}]}]}" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) result = t_grafana.statistic_aggregation( self.m_compute_node, 'compute_node', 'host_cpu_usage', 60) self.assertEqual(result, 67.3550078657577) def test_get_host_cpu_usage(self): self.m_grafana.get_host_cpu_usage(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_cpu_usage', 60, 'min', 15) def test_get_host_ram_usage(self): self.m_grafana.get_host_ram_usage(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_ram_usage', 60, 'min', 15) def test_get_host_outlet_temperature(self): self.m_grafana.get_host_outlet_temp(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_outlet_temp', 60, 'min', 15) def test_get_host_inlet_temperature(self): self.m_grafana.get_host_inlet_temp(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_inlet_temp', 60, 'min', 15) def test_get_host_airflow(self): self.m_grafana.get_host_airflow(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_airflow', 60, 'min', 15) def test_get_host_power(self): self.m_grafana.get_host_power(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_power', 60, 'min', 15) def test_get_instance_cpu_usage(self): self.m_grafana.get_instance_cpu_usage(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'instance', 'instance_cpu_usage', 60, 'min', 15) def test_get_instance_ram_usage(self): self.m_grafana.get_instance_ram_usage(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'instance', 'instance_ram_usage', 60, 'min', 15) def test_get_instance_ram_allocated(self): self.m_grafana.get_instance_ram_allocated(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'instance', 'instance_ram_allocated', 60, 'min', 15) def test_get_instance_l3_cache_usage(self): self.m_grafana.get_instance_l3_cache_usage(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'instance', 'instance_l3_cache_usage', 60, 'min', 15) def test_get_instance_root_disk_allocated(self): self.m_grafana.get_instance_root_disk_size(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'instance', 'instance_root_disk_size', 60, 'min', 15) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/test_manager.py0000664000175000017500000001515300000000000030143 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from unittest.mock import MagicMock from watcher.common import exception from watcher.decision_engine.datasources import gnocchi from watcher.decision_engine.datasources import grafana from watcher.decision_engine.datasources import manager as ds_manager from watcher.decision_engine.datasources import monasca from watcher.tests import base class TestDataSourceManager(base.BaseTestCase): def _dsm_config(self, **kwargs): dss = ['gnocchi', 'monasca'] opts = dict(datasources=dss, metric_map_path=None) opts.update(kwargs) return MagicMock(**opts) def _dsm(self, **kwargs): opts = dict(config=self._dsm_config(), osc=mock.MagicMock()) opts.update(kwargs) return ds_manager.DataSourceManager(**opts) def test_metric_file_path_not_exists(self): manager = self._dsm() expected = ds_manager.DataSourceManager.metric_map actual = manager.metric_map self.assertEqual(expected, actual) self.assertEqual({}, manager.load_metric_map('/nope/nope/nope.yaml')) def test_metric_file_metric_override(self): path = 'watcher.decision_engine.datasources.manager.' \ 'DataSourceManager.load_metric_map' retval = { monasca.MonascaHelper.NAME: {"host_airflow": "host_fnspid"} } with mock.patch(path, return_value=retval): dsmcfg = self._dsm_config(datasources=['monasca']) manager = self._dsm(config=dsmcfg) backend = manager.get_backend(['host_airflow']) self.assertEqual("host_fnspid", backend.METRIC_MAP['host_airflow']) @mock.patch.object(grafana, 'CONF') def test_metric_file_metric_override_grafana(self, m_config): """Grafana requires a different structure in the metric map""" m_config.grafana_client.token = \ "eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk==" m_config.grafana_client.base_url = "https://grafana.proxy/api/" path = 'watcher.decision_engine.datasources.manager.' \ 'DataSourceManager.load_metric_map' metric_map = { 'db': 'production_cloud', 'project': '7485', 'attribute': 'hostname', 'translator': 'influxdb', 'query': 'SHOW SERIES' } retval = { grafana.GrafanaHelper.NAME: {"host_airflow": metric_map} } with mock.patch(path, return_value=retval): dsmcfg = self._dsm_config(datasources=['grafana']) manager = self._dsm(config=dsmcfg) backend = manager.get_backend(['host_airflow']) self.assertEqual(metric_map, backend.METRIC_MAP['host_airflow']) def test_metric_file_invalid_ds(self): with mock.patch('yaml.safe_load') as mo: mo.return_value = {"newds": {"metric_one": "i_am_metric_one"}} mgr = self._dsm() self.assertNotIn('newds', mgr.metric_map.keys()) def test_get_backend(self): manager = self._dsm() backend = manager.get_backend(['host_cpu_usage', 'instance_cpu_usage']) self.assertEqual(backend, manager.gnocchi) def test_get_backend_order(self): dss = ['monasca', 'gnocchi'] dsmcfg = self._dsm_config(datasources=dss) manager = self._dsm(config=dsmcfg) backend = manager.get_backend(['host_cpu_usage', 'instance_cpu_usage']) self.assertEqual(backend, manager.monasca) def test_get_backend_wrong_metric(self): manager = self._dsm() self.assertRaises(exception.MetricNotAvailable, manager.get_backend, ['host_cpu', 'instance_cpu_usage']) @mock.patch.object(gnocchi, 'GnocchiHelper') def test_get_backend_error_datasource(self, m_gnocchi): m_gnocchi.side_effect = exception.DataSourceNotAvailable manager = self._dsm() backend = manager.get_backend(['host_cpu_usage', 'instance_cpu_usage']) self.assertEqual(backend, manager.monasca) @mock.patch.object(grafana.GrafanaHelper, 'METRIC_MAP', {'host_cpu_usage': 'test'}) def test_get_backend_grafana(self): dss = ['grafana', 'gnocchi'] dsmcfg = self._dsm_config(datasources=dss) manager = self._dsm(config=dsmcfg) backend = manager.get_backend(['host_cpu_usage']) self.assertEqual(backend, manager.grafana) @mock.patch.object(grafana, 'CONF') def test_dynamic_metric_map_grafana(self, m_config): m_config.grafana_client.token = \ "eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk==" m_config.grafana_client.base_url = "https://grafana.proxy/api/" m_config.grafana_client.project_id_map = {'host_cpu_usage': 7221} m_config.grafana_client.attribute_map = {'host_cpu_usage': 'hostname'} m_config.grafana_client.database_map = {'host_cpu_usage': 'mock_db'} m_config.grafana_client.translator_map = {'host_cpu_usage': 'influxdb'} m_config.grafana_client.query_map = { 'host_cpu_usage': 'SHOW SERIES' } dss = ['grafana', 'gnocchi'] dsmcfg = self._dsm_config(datasources=dss) manager = self._dsm(config=dsmcfg) backend = manager.get_backend(['host_cpu_usage']) self.assertEqual(backend, manager.grafana) def test_get_backend_no_datasources(self): dsmcfg = self._dsm_config(datasources=[]) manager = self._dsm(config=dsmcfg) self.assertRaises(exception.NoDatasourceAvailable, manager.get_backend, ['host_cpu_usage', 'instance_cpu_usage']) dsmcfg = self._dsm_config(datasources=None) manager = self._dsm(config=dsmcfg) self.assertRaises(exception.NoDatasourceAvailable, manager.get_backend, ['host_cpu_usage', 'instance_cpu_usage']) def test_get_backend_no_metrics(self): manager = self._dsm() self.assertRaises(exception.InvalidParameter, manager.get_backend, []) self.assertRaises(exception.InvalidParameter, manager.get_backend, None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/test_monasca_helper.py0000664000175000017500000001310100000000000031500 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime from unittest import mock from oslo_config import cfg from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import monasca as monasca_helper from watcher.tests import base CONF = cfg.CONF @mock.patch.object(clients.OpenStackClients, 'monasca') class TestMonascaHelper(base.BaseTestCase): def setUp(self): super(TestMonascaHelper, self).setUp() self.osc_mock = mock.Mock() self.helper = monasca_helper.MonascaHelper(osc=self.osc_mock) stat_agg_patcher = mock.patch.object( self.helper, 'statistic_aggregation', spec=monasca_helper.MonascaHelper.statistic_aggregation) self.mock_aggregation = stat_agg_patcher.start() self.addCleanup(stat_agg_patcher.stop) def test_monasca_statistic_aggregation(self, mock_monasca): monasca = mock.MagicMock() expected_stat = [{ 'columns': ['timestamp', 'avg'], 'dimensions': { 'hostname': 'rdev-indeedsrv001', 'service': 'monasca'}, 'id': '0', 'name': 'cpu.percent', 'statistics': [ ['2016-07-29T12:45:00Z', 0.0], ['2016-07-29T12:50:00Z', 0.9], ['2016-07-29T12:55:00Z', 0.9]]}] monasca.metrics.list_statistics.return_value = expected_stat mock_monasca.return_value = monasca helper = monasca_helper.MonascaHelper() result = helper.statistic_aggregation( resource=mock.Mock(id='NODE_UUID'), resource_type='compute_node', meter_name='host_cpu_usage', period=7200, granularity=300, aggregate='mean', ) self.assertEqual(0.6, result) def test_monasca_statistic_series(self, mock_monasca): monasca = mock.MagicMock() expected_stat = [{ 'columns': ['timestamp', 'avg'], 'dimensions': { 'hostname': 'rdev-indeedsrv001', 'service': 'monasca'}, 'id': '0', 'name': 'cpu.percent', 'statistics': [ ['2016-07-29T12:45:00Z', 0.0], ['2016-07-29T12:50:00Z', 0.9], ['2016-07-29T12:55:00Z', 0.9]]}] expected_result = { '2016-07-29T12:45:00Z': 0.0, '2016-07-29T12:50:00Z': 0.9, '2016-07-29T12:55:00Z': 0.9, } monasca.metrics.list_statistics.return_value = expected_stat mock_monasca.return_value = monasca start = datetime(year=2016, month=7, day=29, hour=12, minute=45) end = datetime(year=2016, month=7, day=29, hour=12, minute=55) helper = monasca_helper.MonascaHelper() result = helper.statistic_series( resource=mock.Mock(id='NODE_UUID'), resource_type='compute_node', meter_name='host_cpu_usage', start_time=start, end_time=end, granularity=300, ) self.assertEqual(expected_result, result) def test_statistic_aggregation_metric_unavailable(self, mock_monasca): helper = monasca_helper.MonascaHelper() # invalidate host_cpu_usage in metric map original_metric_value = helper.METRIC_MAP.get('host_cpu_usage') helper.METRIC_MAP.update( host_cpu_usage=None ) self.assertRaises( exception.MetricNotAvailable, helper.statistic_aggregation, resource=mock.Mock(id='NODE_UUID'), resource_type='compute_node', meter_name='host_cpu_usage', period=7200, granularity=300, aggregate='mean', ) # restore the metric map as it is a static attribute that does not get # restored between unit tests! helper.METRIC_MAP.update( instance_cpu_usage=original_metric_value ) def test_check_availability(self, mock_monasca): monasca = mock.MagicMock() monasca.metrics.list.return_value = True mock_monasca.return_value = monasca helper = monasca_helper.MonascaHelper() result = helper.check_availability() self.assertEqual('available', result) def test_check_availability_with_failure(self, mock_monasca): monasca = mock.MagicMock() monasca.metrics.list.side_effect = Exception() mock_monasca.return_value = monasca helper = monasca_helper.MonascaHelper() self.assertEqual('not available', helper.check_availability()) def test_get_host_cpu_usage(self, mock_monasca): self.mock_aggregation.return_value = 0.6 node = mock.Mock(id='compute1') cpu_usage = self.helper.get_host_cpu_usage(node, 600, 'mean') self.assertEqual(0.6, cpu_usage) def test_get_instance_cpu_usage(self, mock_monasca): self.mock_aggregation.return_value = 0.6 node = mock.Mock(id='vm1') cpu_usage = self.helper.get_instance_cpu_usage(node, 600, 'mean') self.assertEqual(0.6, cpu_usage) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/datasources/test_prometheus_helper.py0000664000175000017500000006721100000000000032265 0ustar00zuulzuul00000000000000# Copyright 2024 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from observabilityclient import prometheus_client from oslo_config import cfg from watcher.common import exception from watcher.decision_engine.datasources import prometheus as prometheus_helper from watcher.tests import base class TestPrometheusHelper(base.BaseTestCase): def setUp(self): super(TestPrometheusHelper, self).setUp() with mock.patch.object( prometheus_client.PrometheusAPIClient, '_get', return_value={'data': {'activeTargets': [ {'labels': { 'fqdn': 'marios-env.controlplane.domain', 'instance': '10.0.1.2:9100', 'job': 'node', }}, {'labels': { 'fqdn': 'marios-env-again.controlplane.domain', 'instance': 'localhost:9100', 'job': 'node' }} ]}}): cfg.CONF.prometheus_client.host = "foobarbaz" cfg.CONF.prometheus_client.port = "1234" self.helper = prometheus_helper.PrometheusHelper() stat_agg_patcher = mock.patch.object( self.helper, 'statistic_aggregation', spec=prometheus_helper.PrometheusHelper.statistic_aggregation) self.mock_aggregation = stat_agg_patcher.start() self.addCleanup(stat_agg_patcher.stop) self.mock_instance = mock.Mock( uuid='uuid-0', memory=512, disk=2, vcpus=2) def test_unset_missing_prometheus_host(self): cfg.CONF.prometheus_client.port = '123' cfg.CONF.prometheus_client.host = None self.assertRaisesRegex( exception.MissingParameter, 'prometheus host and port must be ' 'set in watcher.conf', prometheus_helper.PrometheusHelper ) cfg.CONF.prometheus_client.host = '' self.assertRaisesRegex( exception.MissingParameter, 'prometheus host and port must be ' 'set in watcher.conf', prometheus_helper.PrometheusHelper ) def test_unset_missing_prometheus_port(self): cfg.CONF.prometheus_client.host = 'some.host.domain' cfg.CONF.prometheus_client.port = None self.assertRaisesRegex( exception.MissingParameter, 'prometheus host and port must be ' 'set in watcher.conf', prometheus_helper.PrometheusHelper ) cfg.CONF.prometheus_client.port = '' self.assertRaisesRegex( exception.MissingParameter, 'prometheus host and port must be ' 'set in watcher.conf', prometheus_helper.PrometheusHelper ) def test_invalid_prometheus_port(self): cfg.CONF.prometheus_client.host = "hostOK" cfg.CONF.prometheus_client.port = "123badPort" self.assertRaisesRegex( exception.InvalidParameter, "missing or invalid port number " "'123badPort'", prometheus_helper.PrometheusHelper ) cfg.CONF.prometheus_client.port = "123456" self.assertRaisesRegex( exception.InvalidParameter, "missing or invalid port number " "'123456'", prometheus_helper.PrometheusHelper ) def test_invalid_prometheus_host(self): cfg.CONF.prometheus_client.port = "123" cfg.CONF.prometheus_client.host = "-badhost" self.assertRaisesRegex( exception.InvalidParameter, "hostname '-badhost' " "failed regex match", prometheus_helper.PrometheusHelper ) too_long_hostname = ("a" * 256) cfg.CONF.prometheus_client.host = too_long_hostname self.assertRaisesRegex( exception.InvalidParameter, ("hostname is too long: " + "'" + too_long_hostname + "'"), prometheus_helper.PrometheusHelper ) @mock.patch.object(prometheus_client.PrometheusAPIClient, 'query') @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_prometheus_statistic_aggregation(self, mock_prometheus_get, mock_prometheus_query): mock_node = mock.Mock( uuid='1234', hostname='marios-env.controlplane.domain') expected_cpu_usage = 3.2706140350701673 mock_prom_metric = mock.Mock( labels={'instance': '10.0.1.2:9100'}, timestamp=1731065985.408, value=expected_cpu_usage ) mock_prometheus_query.return_value = [mock_prom_metric] mock_prometheus_get.return_value = {'data': {'activeTargets': [ {'labels': { 'fqdn': 'marios-env.controlplane.domain', 'instance': '10.0.1.2:9100', 'job': 'node', }}]}} helper = prometheus_helper.PrometheusHelper() result = helper.statistic_aggregation( resource=mock_node, resource_type='compute_node', meter_name='host_cpu_usage', period=300, aggregate='mean', granularity=300, ) self.assertEqual(expected_cpu_usage, result) mock_prometheus_query.assert_called_once_with( "100 - (avg by (instance)(rate(node_cpu_seconds_total" "{mode='idle',instance='10.0.1.2:9100'}[300s])) * 100)") @mock.patch.object(prometheus_client.PrometheusAPIClient, 'query') @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_get_instance_cpu_usage(self, mock_prometheus_get, mock_prometheus_query): mock_instance = self.mock_instance expected_cpu_usage = 13.2706140350701673 mock_prom_metric = mock.Mock( labels={'resource': 'uuid-0'}, timestamp=1731065985.408, value=expected_cpu_usage ) mock_prometheus_query.return_value = [mock_prom_metric] helper = prometheus_helper.PrometheusHelper() cpu_usage = helper.get_instance_cpu_usage(mock_instance) self.assertIsInstance(cpu_usage, float) self.assertEqual(expected_cpu_usage, cpu_usage) @mock.patch.object(prometheus_client.PrometheusAPIClient, 'query') @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_get_instance_ram_usage(self, mock_prometheus_get, mock_prometheus_query): mock_instance = self.mock_instance expected_ram_usage = 49.86 mock_prom_metric = mock.Mock( labels={'resource': 'uuid-0'}, timestamp=1731065985.408, value=expected_ram_usage ) mock_prometheus_query.return_value = [mock_prom_metric] helper = prometheus_helper.PrometheusHelper() ram_usage = helper.get_instance_ram_usage( mock_instance, period=222, aggregate="max", granularity=200) self.assertIsInstance(ram_usage, float) self.assertEqual(expected_ram_usage, ram_usage) @mock.patch.object(prometheus_client.PrometheusAPIClient, 'query') @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_get_instance_ram_allocated(self, mock_prometheus_get, mock_prometheus_query): mock_instance = self.mock_instance helper = prometheus_helper.PrometheusHelper() ram_allocated = helper.get_instance_ram_allocated(mock_instance, period=222, aggregate="max") self.assertIsInstance(ram_allocated, float) self.assertEqual(512, ram_allocated) @mock.patch.object(prometheus_client.PrometheusAPIClient, 'query') @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_get_instance_root_disk_size(self, mock_prometheus_get, mock_prometheus_query): mock_instance = self.mock_instance helper = prometheus_helper.PrometheusHelper() disk_size = helper.get_instance_root_disk_size(mock_instance, period=331, aggregate="avg") self.assertIsInstance(disk_size, float) self.assertEqual(2, disk_size) @mock.patch.object(prometheus_client.PrometheusAPIClient, 'query') @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_prometheus_stt_agg_instance_cpu_usage(self, mock_prometheus_get, mock_prometheus_query): mock_instance = self.mock_instance expected_cpu_usage = 13.2706140350701673 mock_prom_metric = mock.Mock( labels={'resource': 'uuid-0'}, timestamp=1731065985.408, value=expected_cpu_usage ) mock_prometheus_query.return_value = [mock_prom_metric] helper = prometheus_helper.PrometheusHelper() result_cpu = helper.statistic_aggregation( resource=mock_instance, resource_type='instance', meter_name='instance_cpu_usage', period=300, granularity=300, aggregate='mean', ) self.assertEqual(expected_cpu_usage, result_cpu) self.assertIsInstance(result_cpu, float) mock_prometheus_query.assert_called_once_with( "clamp_max((avg by (instance)(rate(" "ceilometer_cpu{resource='uuid-0'}[300s]))" "/10e+8) *(100/2), 100)" ) @mock.patch.object(prometheus_client.PrometheusAPIClient, 'query') @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_prometheus_stt_agg_instance_ram_usage(self, mock_prometheus_get, mock_prometheus_query): mock_instance = self.mock_instance expected_ram_usage = 49.86 mock_prom_metric = mock.Mock( labels={'resource': 'uuid-0'}, timestamp=1731065985.408, value=expected_ram_usage ) mock_prometheus_query.return_value = [mock_prom_metric] helper = prometheus_helper.PrometheusHelper() result_ram_usage = helper.statistic_aggregation( resource=mock_instance, resource_type='instance', meter_name='instance_ram_usage', period=300, granularity=300, aggregate='mean', ) self.assertEqual(expected_ram_usage, result_ram_usage) self.assertIsInstance(result_ram_usage, float) mock_prometheus_query.assert_called_with( "avg_over_time(ceilometer_memory_usage{resource='uuid-0'}[300s])" ) @mock.patch.object(prometheus_client.PrometheusAPIClient, 'query') @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_prometheus_stt_agg_instance_root_size(self, mock_prometheus_get, mock_prometheus_query): mock_instance = self.mock_instance helper = prometheus_helper.PrometheusHelper() result_disk = helper.statistic_aggregation( resource=mock_instance, resource_type='instance', meter_name='instance_root_disk_size', period=300, granularity=300, aggregate='mean', ) self.assertEqual(2, result_disk) self.assertIsInstance(result_disk, float) @mock.patch.object(prometheus_client.PrometheusAPIClient, 'query') @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_prometheus_stt_agg_instance_ram_alloc(self, mock_prometheus_get, mock_prometheus_query): mock_instance = self.mock_instance helper = prometheus_helper.PrometheusHelper() result_memory = helper.statistic_aggregation( resource=mock_instance, resource_type='instance', meter_name='instance_ram_allocated', period=300, granularity=300, aggregate='mean', ) self.assertEqual(512, result_memory) self.assertIsInstance(result_memory, float) def test_statistic_aggregation_metric_unavailable(self): self.assertRaisesRegex( NotImplementedError, 'does not support statistic_series', self.helper.statistic_series ) @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_prometheus_list_metrics(self, mock_prometheus_get): expected_metrics = set( ['go_gc_duration_seconds', 'go_gc_duration_seconds_count', 'go_gc_duration_seconds_sum', 'go_goroutines',] ) mock_prometheus_get.return_value = { 'status': 'success', 'data': [ 'go_gc_duration_seconds', 'go_gc_duration_seconds_count', 'go_gc_duration_seconds_sum', 'go_goroutines', ] } result = self.helper.list_metrics() self.assertEqual(expected_metrics, result) @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_prometheus_list_metrics_error(self, mock_prometheus_get): mock_prometheus_get.side_effect = ( prometheus_client.PrometheusAPIClientError("nope")) result = self.helper.list_metrics() self.assertEqual(set(), result) @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_prometheus_check_availability(self, mock_prometheus_get): mock_prometheus_get.return_value = { 'status': 'success', 'data': { 'startTime': '2024-11-05T12:59:56.962333207Z', 'CWD': '/prometheus', 'reloadConfigSuccess': True, 'lastConfigTime': '2024-11-05T12:59:56Z', 'corruptionCount': 0, 'goroutineCount': 30, 'GOMAXPROCS': 8, 'GOMEMLIMIT': 9223372036854775807, 'GOGC': '75', 'GODEBUG': '', 'storageRetention': '15d' } } result = self.helper.check_availability() self.assertEqual('available', result) @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_prometheus_check_availability_error(self, mock_prometheus_get): mock_prometheus_get.side_effect = ( prometheus_client.PrometheusAPIClientError("nope")) result = self.helper.check_availability() self.assertEqual('not available', result) def test_get_host_cpu_usage(self): cpu_use = self.helper.get_host_cpu_usage('someNode', 345, 'mean', 300) self.assertIsInstance(cpu_use, float) self.mock_aggregation.assert_called_once_with( 'someNode', 'compute_node', 'host_cpu_usage', period=345, granularity=300, aggregate='mean') def test_get_host_cpu_usage_none(self): self.mock_aggregation.return_value = None cpu_use = self.helper.get_host_cpu_usage('someNode', 345, 'mean', 300) self.assertIsNone(cpu_use) def test_get_host_cpu_usage_max(self): cpu_use = self.helper.get_host_cpu_usage('theNode', 223, 'max', 100) self.assertIsInstance(cpu_use, float) self.mock_aggregation.assert_called_once_with( 'theNode', 'compute_node', 'host_cpu_usage', period=223, granularity=100, aggregate='min') def test_get_host_cpu_usage_min(self): cpu_use = self.helper.get_host_cpu_usage('theNode', 223, 'min', 100) self.assertIsInstance(cpu_use, float) self.mock_aggregation.assert_called_once_with( 'theNode', 'compute_node', 'host_cpu_usage', period=223, granularity=100, aggregate='max') def test_get_host_ram_usage(self): ram_use = self.helper.get_host_ram_usage( 'anotherNode', 456, 'mean', 300) self.assertIsInstance(ram_use, float) self.mock_aggregation.assert_called_once_with( 'anotherNode', 'compute_node', 'host_ram_usage', period=456, granularity=300, aggregate='mean') def test_get_host_ram_usage_none(self): self.mock_aggregation.return_value = None ram_use = self.helper.get_host_ram_usage('NOPE', 234, 'mean', 567) self.assertIsNone(ram_use, float) self.mock_aggregation.assert_called() self.mock_aggregation.assert_called_once_with( 'NOPE', 'compute_node', 'host_ram_usage', period=234, granularity=567, aggregate='mean') def test_get_host_ram_usage_max(self): ram_use = self.helper.get_host_ram_usage( 'aNode', 456, 'max', 300) self.assertIsInstance(ram_use, float) self.mock_aggregation.assert_called_once_with( 'aNode', 'compute_node', 'host_ram_usage', period=456, granularity=300, aggregate='min') def test_get_host_ram_usage_min(self): ram_use = self.helper.get_host_ram_usage( 'aNode', 456, 'min', 300) self.assertIsInstance(ram_use, float) self.mock_aggregation.assert_called_once_with( 'aNode', 'compute_node', 'host_ram_usage', period=456, granularity=300, aggregate='max') @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_build_prometheus_fqdn_host_instance_map( self, mock_prometheus_get): mock_prometheus_get.return_value = {'data': {'activeTargets': [ {'labels': { 'fqdn': 'foo.controlplane.domain', 'instance': '10.1.2.1:9100', 'job': 'node', }}, {'labels': { 'fqdn': 'bar.controlplane.domain', 'instance': '10.1.2.2:9100', 'job': 'node', }}, {'labels': { 'fqdn': 'baz.controlplane.domain', 'instance': '10.1.2.3:9100', 'job': 'node', }}, ]}} expected_fqdn_map = {'foo.controlplane.domain': '10.1.2.1:9100', 'bar.controlplane.domain': '10.1.2.2:9100', 'baz.controlplane.domain': '10.1.2.3:9100'} expected_host_map = {'foo': '10.1.2.1:9100', 'bar': '10.1.2.2:9100', 'baz': '10.1.2.3:9100'} helper = prometheus_helper.PrometheusHelper() self.assertEqual(helper.prometheus_fqdn_instance_map, expected_fqdn_map) self.assertEqual(helper.prometheus_host_instance_map, expected_host_map) @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_missing_prometheus_fqdn_label(self, mock_prometheus_get): mock_prometheus_get.return_value = {'data': {'activeTargets': [ {'labels': { 'instance': '10.1.2.1:9100', 'job': 'node', }}, {'labels': { 'instance': '10.1.2.2:9100', 'job': 'node', }}, ]}} helper = prometheus_helper.PrometheusHelper() self.assertEqual({}, helper.prometheus_fqdn_instance_map) self.assertEqual({}, helper.prometheus_host_instance_map) @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_using_hostnames_not_fqdn(self, mock_prometheus_get): mock_prometheus_get.return_value = {'data': {'activeTargets': [ {'labels': { 'fqdn': 'ena', 'instance': '10.1.2.1:9100', 'job': 'node', }}, {'labels': { 'fqdn': 'dyo', 'instance': '10.1.2.2:9100', 'job': 'node', }}, ]}} helper = prometheus_helper.PrometheusHelper() expected_fqdn_map = {'ena': '10.1.2.1:9100', 'dyo': '10.1.2.2:9100'} self.assertEqual( helper.prometheus_fqdn_instance_map, expected_fqdn_map) self.assertEqual({}, helper.prometheus_host_instance_map) @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_override_prometheus_fqdn_label(self, mock_prometheus_get): mock_prometheus_get.return_value = {'data': {'activeTargets': [ {'labels': { 'custom_fqdn_label': 'foo.controlplane.domain', 'instance': '10.1.2.1:9100', 'job': 'node', }}, {'labels': { 'custom_fqdn_label': 'bar.controlplane.domain', 'instance': '10.1.2.2:9100', 'job': 'node', }}, ]}} expected_fqdn_map = {'foo.controlplane.domain': '10.1.2.1:9100', 'bar.controlplane.domain': '10.1.2.2:9100'} expected_host_map = {'foo': '10.1.2.1:9100', 'bar': '10.1.2.2:9100'} cfg.CONF.prometheus_client.fqdn_label = 'custom_fqdn_label' helper = prometheus_helper.PrometheusHelper() self.assertEqual(helper.prometheus_fqdn_instance_map, expected_fqdn_map) self.assertEqual(helper.prometheus_host_instance_map, expected_host_map) def test_resolve_prometheus_instance_label(self): expected_instance_label = '10.0.1.2:9100' result = self.helper._resolve_prometheus_instance_label( 'marios-env.controlplane.domain') self.assertEqual(result, expected_instance_label) result = self.helper._resolve_prometheus_instance_label( 'marios-env') self.assertEqual(result, expected_instance_label) @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_resolve_prometheus_instance_label_none(self, mock_prometheus_get): mock_prometheus_get.return_value = {'data': {'activeTargets': []}} result = self.helper._resolve_prometheus_instance_label('nope') self.assertIsNone(result) mock_prometheus_get.assert_called_once_with("targets?state=active") def test_build_prometheus_query_node_cpu_avg_agg(self): expected_query = ( "100 - (avg by (instance)(rate(node_cpu_seconds_total" "{mode='idle',instance='a_host'}[111s])) * 100)") result = self.helper._build_prometheus_query( 'avg', 'node_cpu_seconds_total', 'a_host', '111') self.assertEqual(result, expected_query) def test_build_prometheus_query_node_cpu_max_agg(self): expected_query = ( "100 - (max by (instance)(rate(node_cpu_seconds_total" "{mode='idle',instance='b_host'}[444s])) * 100)") result = self.helper._build_prometheus_query( 'max', 'node_cpu_seconds_total', 'b_host', '444') self.assertEqual(result, expected_query) def test_build_prometheus_query_node_memory_avg_agg(self): expected_query = ( "(node_memory_MemTotal_bytes{instance='c_host'} - avg_over_time" "(node_memory_MemAvailable_bytes{instance='c_host'}[555s])) " "/ 1024 / 1024") result = self.helper._build_prometheus_query( 'avg', 'node_memory_MemAvailable_bytes', 'c_host', '555') self.assertEqual(result, expected_query) def test_build_prometheus_query_node_memory_min_agg(self): expected_query = ( "(node_memory_MemTotal_bytes{instance='d_host'} - min_over_time" "(node_memory_MemAvailable_bytes{instance='d_host'}[222s])) " "/ 1024 / 1024") result = self.helper._build_prometheus_query( 'min', 'node_memory_MemAvailable_bytes', 'd_host', '222') self.assertEqual(result, expected_query) def test_build_prometheus_query_instance_memory_avg_agg(self): expected_query = ( "avg_over_time(ceilometer_memory_usage{resource='uuid-0'}[555s])" ) result = self.helper._build_prometheus_query( 'avg', 'ceilometer_memory_usage', 'uuid-0', '555') self.assertEqual(result, expected_query) def test_build_prometheus_query_instance_memory_min_agg(self): expected_query = ( "min_over_time(ceilometer_memory_usage{resource='uuid-0'}[222s])" ) result = self.helper._build_prometheus_query( 'min', 'ceilometer_memory_usage', 'uuid-0', '222') self.assertEqual(result, expected_query) def test_build_prometheus_query_instance_cpu_avg_agg(self): expected_query = ( "clamp_max((avg by (instance)(rate(" "ceilometer_cpu{resource='uuid-0'}[222s]))" "/10e+8) *(100/2), 100)" ) result = self.helper._build_prometheus_query( 'avg', 'ceilometer_cpu', 'uuid-0', '222', resource=self.mock_instance) self.assertEqual(result, expected_query) def test_build_prometheus_query_instance_cpu_max_agg(self): expected_query = ( "clamp_max((max by (instance)(rate(" "ceilometer_cpu{resource='uuid-0'}[555s]))" "/10e+8) *(100/4), 100)" ) mock_instance = mock.Mock( uuid='uuid-0', memory=512, disk=2, vcpus=4) result = self.helper._build_prometheus_query( 'max', 'ceilometer_cpu', 'uuid-0', '555', resource=mock_instance) self.assertEqual(result, expected_query) def test_build_prometheus_query_error(self): self.assertRaisesRegex( exception.InvalidParameter, 'Cannot process prometheus meter NOPE', self.helper._build_prometheus_query, 'min', 'NOPE', 'the_host', '222' ) self.assertRaisesRegex( exception.InvalidParameter, 'instance_label None, period 333', self.helper._build_prometheus_query, 'min', 'node_cpu_seconds_total', None, '333' ) def test_resolve_prometheus_aggregate_vanilla(self): result = self.helper._resolve_prometheus_aggregate('mean', 'foo') self.assertEqual(result, 'avg') result = self.helper._resolve_prometheus_aggregate('count', 'foo') self.assertEqual(result, 'avg') result = self.helper._resolve_prometheus_aggregate('max', 'foometric') self.assertEqual(result, 'max') result = self.helper._resolve_prometheus_aggregate('min', 'barmetric') self.assertEqual(result, 'min') def test_resolve_prometheus_aggregate_unknown(self): self.assertRaisesRegex( exception.InvalidParameter, 'Unknown Watcher aggregate NOPE.', self.helper._resolve_prometheus_aggregate, 'NOPE', 'some_meter') @mock.patch.object(prometheus_client.PrometheusAPIClient, '_get') def test_prometheus_query_custom_uuid_label(self, mock_prometheus_get): cfg.CONF.prometheus_client.instance_uuid_label = 'custom_uuid_label' expected_query = ( "clamp_max((max by (instance)" "(rate(ceilometer_cpu{custom_uuid_label='uuid-0'}[555s]))" "/10e+8) *(100/4), 100)" ) mock_instance = mock.Mock( uuid='uuid-0', memory=512, disk=2, vcpus=4) result = self.helper._build_prometheus_query( 'max', 'ceilometer_cpu', 'uuid-0', '555', resource=mock_instance) self.assertEqual(result, expected_query) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6471353 python_watcher-14.0.0/watcher/tests/decision_engine/event_consumer/0000775000175000017500000000000000000000000025632 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/event_consumer/__init__.py0000664000175000017500000000000000000000000027731 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/fake_goals.py0000664000175000017500000000423300000000000025245 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.goal import base as base_goal from watcher.decision_engine.goal.efficacy import base as efficacy_base from watcher.decision_engine.goal.efficacy import indicators from watcher.decision_engine.goal.efficacy import specs class FakeGoal(base_goal.Goal): NAME = NotImplemented DISPLAY_NAME = NotImplemented @classmethod def get_name(cls): return cls.NAME @classmethod def get_display_name(cls): return cls.DISPLAY_NAME @classmethod def get_translatable_display_name(cls): return cls.DISPLAY_NAME @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class DummyIndicator(indicators.IndicatorSpecification): def __init__(self): super(DummyIndicator, self).__init__( name="dummy", description="Dummy indicator", unit="%", ) @property def schema(self): return { "type": "integer", "minimum": 0 } class DummySpec1(efficacy_base.EfficacySpecification): def get_indicators_specifications(self): return [DummyIndicator()] def get_global_efficacy_indicator(self, indicators_map): return None class FakeDummy1(FakeGoal): NAME = "dummy_1" DISPLAY_NAME = "Dummy 1" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return DummySpec1() class FakeDummy2(FakeGoal): NAME = "dummy_2" DISPLAY_NAME = "Dummy 2" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/fake_metal_helper.py0000664000175000017500000000321200000000000026575 0ustar00zuulzuul00000000000000# Copyright (c) 2023 Cloudbase Solutions # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import uuid from watcher.common.metal_helper import constants as m_constants def get_mock_metal_node(node_id=None, power_state=m_constants.PowerState.ON, running_vms=0, hostname=None, compute_state='up'): node_id = node_id or str(uuid.uuid4()) # NOTE(lpetrut): the hostname is important for some of the tests, # which expect it to match the fake cluster model. hostname = hostname or "compute-" + str(uuid.uuid4()).split('-')[0] hypervisor_node_dict = { 'hypervisor_hostname': hostname, 'running_vms': running_vms, 'service': { 'host': hostname, }, 'state': compute_state, } hypervisor_node = mock.Mock(**hypervisor_node_dict) hypervisor_node.to_dict.return_value = hypervisor_node_dict node = mock.Mock() node.get_power_state.return_value = power_state node.get_id.return_value = uuid node.get_hypervisor_node.return_value = hypervisor_node return node ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/fake_strategies.py0000664000175000017500000000371400000000000026315 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.decision_engine.strategy.strategies import base as base_strategy class FakeStrategy(base_strategy.BaseStrategy): NAME = NotImplemented DISPLAY_NAME = NotImplemented GOAL_NAME = NotImplemented @classmethod def get_name(cls): return cls.NAME @classmethod def get_display_name(cls): return cls.DISPLAY_NAME @classmethod def get_translatable_display_name(cls): return cls.DISPLAY_NAME @classmethod def get_goal_name(cls): return cls.GOAL_NAME @classmethod def get_config_opts(cls): return [] def pre_execute(self): pass def do_execute(self): pass def post_execute(self): pass class FakeDummy1Strategy1(FakeStrategy): GOAL_NAME = "dummy_1" NAME = "strategy_1" DISPLAY_NAME = "Strategy 1" @classmethod def get_config_opts(cls): return [ cfg.StrOpt('test_opt', help="Option used for testing."), ] class FakeDummy1Strategy2(FakeStrategy): GOAL_NAME = "dummy_1" NAME = "strategy_2" DISPLAY_NAME = "Strategy 2" class FakeDummy2Strategy3(FakeStrategy): GOAL_NAME = "dummy_2" NAME = "strategy_3" DISPLAY_NAME = "Strategy 3" class FakeDummy2Strategy4(FakeStrategy): GOAL_NAME = "dummy_2" NAME = "strategy_4" DISPLAY_NAME = "Strategy 4" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6471353 python_watcher-14.0.0/watcher/tests/decision_engine/loading/0000775000175000017500000000000000000000000024213 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/loading/__init__.py0000664000175000017500000000000000000000000026312 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/loading/test_collector_loader.py0000664000175000017500000000614100000000000031142 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from stevedore import driver as drivermanager from stevedore import extension as stevedore_extension from unittest import mock from watcher.common import clients from watcher.common import exception from watcher.decision_engine.loading import default as default_loading from watcher.tests import base from watcher.tests import conf_fixture from watcher.tests.decision_engine.model import faker_cluster_state class TestClusterDataModelCollectorLoader(base.TestCase): def setUp(self): super(TestClusterDataModelCollectorLoader, self).setUp() self.useFixture(conf_fixture.ConfReloadFixture()) self.collector_loader = ( default_loading.ClusterDataModelCollectorLoader()) def test_load_collector_with_empty_model(self): self.assertRaises( exception.LoadingError, self.collector_loader.load, None) def test_collector_loader(self): fake_driver = "fake" # Set up the fake Stevedore extensions fake_driver_call = drivermanager.DriverManager.make_test_instance( extension=stevedore_extension.Extension( name=fake_driver, entry_point="%s:%s" % ( faker_cluster_state.FakerModelCollector.__module__, faker_cluster_state.FakerModelCollector.__name__), plugin=faker_cluster_state.FakerModelCollector, obj=None, ), namespace="watcher_cluster_data_model_collectors", ) with mock.patch.object(drivermanager, "DriverManager") as m_driver_manager: m_driver_manager.return_value = fake_driver_call loaded_collector = self.collector_loader.load("fake") self.assertIsInstance( loaded_collector, faker_cluster_state.FakerModelCollector) class TestLoadClusterDataModelCollectors(base.TestCase): collector_loader = default_loading.ClusterDataModelCollectorLoader() scenarios = [ (collector_name, {"collector_name": collector_name, "collector_cls": collector_cls}) for collector_name, collector_cls in collector_loader.list_available().items()] def setUp(self): super(TestLoadClusterDataModelCollectors, self).setUp() self.useFixture(conf_fixture.ConfReloadFixture()) @mock.patch.object(clients, 'OpenStackClients', mock.Mock()) def test_load_cluster_data_model_collectors(self): collector = self.collector_loader.load(self.collector_name) self.assertIsNotNone(collector) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/loading/test_default_planner_loader.py0000664000175000017500000000220400000000000032313 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.loading import default from watcher.decision_engine.planner import base as planner from watcher.tests import base class TestDefaultPlannerLoader(base.TestCase): def setUp(self): super(TestDefaultPlannerLoader, self).setUp() self.loader = default.DefaultPlannerLoader() def test_endpoints(self): for endpoint in self.loader.list_available(): loaded = self.loader.load(endpoint) self.assertIsNotNone(loaded) self.assertIsInstance(loaded, planner.BasePlanner) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/loading/test_default_strategy_loader.py0000664000175000017500000000560600000000000032527 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from stevedore import extension from unittest import mock from watcher.common import exception from watcher.decision_engine.loading import default as default_loading from watcher.decision_engine.strategy.strategies import dummy_strategy from watcher.tests import base class TestDefaultStrategyLoader(base.TestCase): def setUp(self): super(TestDefaultStrategyLoader, self).setUp() self.strategy_loader = default_loading.DefaultStrategyLoader() def test_load_strategy_with_empty_model(self): self.assertRaises( exception.LoadingError, self.strategy_loader.load, None) def test_strategy_loader(self): dummy_strategy_name = "dummy" # Set up the fake Stevedore extensions fake_extmanager_call = extension.ExtensionManager.make_test_instance( extensions=[extension.Extension( name=dummy_strategy_name, entry_point="%s:%s" % ( dummy_strategy.DummyStrategy.__module__, dummy_strategy.DummyStrategy.__name__), plugin=dummy_strategy.DummyStrategy, obj=None, )], namespace="watcher_strategies", ) with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: m_ext_manager.return_value = fake_extmanager_call loaded_strategy = self.strategy_loader.load( "dummy") self.assertEqual("dummy", loaded_strategy.name) self.assertEqual("Dummy strategy", loaded_strategy.display_name) def test_load_dummy_strategy(self): strategy_loader = default_loading.DefaultStrategyLoader() loaded_strategy = strategy_loader.load("dummy") self.assertIsInstance(loaded_strategy, dummy_strategy.DummyStrategy) class TestLoadStrategiesWithDefaultStrategyLoader(base.TestCase): strategy_loader = default_loading.DefaultStrategyLoader() scenarios = [ (strategy_name, {"strategy_name": strategy_name, "strategy_cls": strategy_cls}) for strategy_name, strategy_cls in strategy_loader.list_available().items()] def test_load_strategies(self): strategy = self.strategy_loader.load(self.strategy_name) self.assertIsNotNone(strategy) self.assertEqual(self.strategy_name, strategy.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/loading/test_goal_loader.py0000664000175000017500000000526000000000000030077 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from stevedore import extension from unittest import mock from watcher.common import exception from watcher.decision_engine.goal import goals from watcher.decision_engine.loading import default as default_loading from watcher.tests import base class TestDefaultGoalLoader(base.TestCase): def setUp(self): super(TestDefaultGoalLoader, self).setUp() self.goal_loader = default_loading.DefaultGoalLoader() def test_load_goal_with_empty_model(self): self.assertRaises( exception.LoadingError, self.goal_loader.load, None) def test_goal_loader(self): dummy_goal_name = "dummy" # Set up the fake Stevedore extensions fake_extmanager_call = extension.ExtensionManager.make_test_instance( extensions=[extension.Extension( name=dummy_goal_name, entry_point="%s:%s" % ( goals.Dummy.__module__, goals.Dummy.__name__), plugin=goals.Dummy, obj=None, )], namespace="watcher_goals", ) with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: m_ext_manager.return_value = fake_extmanager_call loaded_goal = self.goal_loader.load("dummy") self.assertEqual("dummy", loaded_goal.name) self.assertEqual("Dummy goal", loaded_goal.display_name) def test_load_dummy_goal(self): goal_loader = default_loading.DefaultGoalLoader() loaded_goal = goal_loader.load("dummy") self.assertIsInstance(loaded_goal, goals.Dummy) class TestLoadGoalsWithDefaultGoalLoader(base.TestCase): goal_loader = default_loading.DefaultGoalLoader() # test matrix (1 test execution per goal entry point) scenarios = [ (goal_name, {"goal_name": goal_name, "goal_cls": goal_cls}) for goal_name, goal_cls in goal_loader.list_available().items()] def test_load_goals(self): goal = self.goal_loader.load(self.goal_name) self.assertIsNotNone(goal) self.assertEqual(self.goal_name, goal.name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6511352 python_watcher-14.0.0/watcher/tests/decision_engine/messaging/0000775000175000017500000000000000000000000024553 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/messaging/__init__.py0000664000175000017500000000000000000000000026652 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/messaging/test_audit_endpoint.py0000664000175000017500000000565600000000000031206 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.decision_engine.audit import continuous as continuous_handler from watcher.decision_engine.audit import oneshot as oneshot_handler from watcher.decision_engine.messaging import audit_endpoint from watcher.decision_engine.model.collector import manager from watcher.tests.db import base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.objects import utils as obj_utils class TestAuditEndpoint(base.DbTestCase): def setUp(self): super(TestAuditEndpoint, self).setUp() self.goal = obj_utils.create_test_goal(self.context) self.audit_template = obj_utils.create_test_audit_template( self.context) self.audit = obj_utils.create_test_audit( self.context, audit_template_id=self.audit_template.id) @mock.patch.object(continuous_handler.ContinuousAuditHandler, 'start') @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") def test_do_trigger_audit(self, mock_collector, mock_handler): mock_collector.return_value = faker_cluster_state.FakerModelCollector() audit_handler = oneshot_handler.OneShotAuditHandler endpoint = audit_endpoint.AuditEndpoint(audit_handler) with mock.patch.object(oneshot_handler.OneShotAuditHandler, 'execute') as mock_call: mock_call.return_value = 0 endpoint.do_trigger_audit(self.context, self.audit.uuid) self.assertEqual(mock_call.call_count, 1) @mock.patch.object(continuous_handler.ContinuousAuditHandler, 'start') @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") def test_trigger_audit(self, mock_collector, mock_handler): mock_collector.return_value = faker_cluster_state.FakerModelCollector() audit_handler = oneshot_handler.OneShotAuditHandler endpoint = audit_endpoint.AuditEndpoint(audit_handler) with mock.patch.object(endpoint.executor, 'submit') as mock_call: mock_execute = mock.call(endpoint.do_trigger_audit, self.context, self.audit.uuid) endpoint.trigger_audit(self.context, self.audit.uuid) mock_call.assert_has_calls([mock_execute]) self.assertEqual(mock_call.call_count, 1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/messaging/test_data_model_endpoint.py0000664000175000017500000000373000000000000032160 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2019 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest import mock from watcher.common import exception from watcher.common import utils from watcher.decision_engine.messaging import data_model_endpoint from watcher.decision_engine.model.collector import manager from watcher.objects import audit class TestDataModelEndpoint(unittest.TestCase): def setUp(self): self.endpoint_instance = data_model_endpoint.DataModelEndpoint('fake') @mock.patch.object(audit.Audit, 'get') def test_get_audit_scope(self, mock_get): mock_get.return_value = mock.Mock(scope='fake_scope') audit_uuid = utils.generate_uuid() result = self.endpoint_instance.get_audit_scope( context=None, audit=audit_uuid) self.assertEqual('fake_scope', result) @mock.patch.object(audit.Audit, 'get_by_name') def test_get_audit_scope_with_error_name(self, mock_get_by_name): mock_get_by_name.side_effect = exception.AuditNotFound() audit_name = 'error_audit_name' self.assertRaises( exception.InvalidIdentity, self.endpoint_instance.get_audit_scope, context=None, audit=audit_name) @mock.patch.object(manager, 'CollectorManager', mock.Mock()) def test_get_data_model_info(self): result = self.endpoint_instance.get_data_model_info(context='fake') self.assertIn('context', result) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6511352 python_watcher-14.0.0/watcher/tests/decision_engine/model/0000775000175000017500000000000000000000000023676 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/__init__.py0000664000175000017500000000000000000000000025775 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6511352 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/0000775000175000017500000000000000000000000024607 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/ironic_scenario_1.xml0000664000175000017500000000070300000000000030717 0ustar00zuulzuul00000000000000 1 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_1.xml0000664000175000017500000002427200000000000027363 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_10.xml0000664000175000017500000001157000000000000027440 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_1_with_1_node_unavailable.xml 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_1_with_1_node_unavailable.xm0000664000175000017500000002330000000000000034021 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_1_with_all_instances_exclude.xml 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_1_with_all_instances_exclude0000664000175000017500000001705100000000000034224 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_1_with_all_nodes_disable.xml0000664000175000017500000000215200000000000034112 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml0000664000175000017500000000204100000000000032132 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml0000664000175000017500000000475300000000000032147 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml0000664000175000017500000000225300000000000032024 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml0000664000175000017500000000277700000000000032154 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xm0000664000175000017500000000042600000000000034045 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml0000664000175000017500000000064700000000000033537 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml0000664000175000017500000000306300000000000032027 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml0000664000175000017500000000325500000000000032033 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_8_with_4_nodes.xml0000664000175000017500000000502500000000000032033 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabled_nodes.xml 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disab0000664000175000017500000000507100000000000034025 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/data/storage_scenario_1.xml0000664000175000017500000001006300000000000031100 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py0000664000175000017500000001557600000000000031467 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Authors: Vojtech CIMA # Bruno GRAZIOLI # Sean MURPHY # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from unittest import mock from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import model_root as modelroot class FakerModelCollector(base.BaseClusterDataModelCollector): def __init__(self, config=None, osc=None, audit_scope=None): if config is None: config = mock.Mock() super(FakerModelCollector, self).__init__(config) @property def notification_endpoints(self): return [] def get_audit_scope_handler(self, audit_scope): return None def execute(self): return self.generate_scenario_1() def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return modelroot.ModelRoot.from_xml(self.load_data(filename)) def generate_scenario_1(self): """Simulates cluster with 2 nodes and 2 instances using 1:1 mapping""" return self.load_model('scenario_1_with_metrics.xml') def generate_scenario_2(self): """Simulates a cluster With 4 nodes and 6 instances all mapped to a single node """ return self.load_model('scenario_2_with_metrics.xml') def generate_scenario_3(self): """Simulates a cluster With 4 nodes and 6 instances all mapped to one node """ return self.load_model('scenario_3_with_metrics.xml') def generate_scenario_4(self): """Simulates a cluster With 4 nodes and 6 instances spread on all nodes """ return self.load_model('scenario_4_with_metrics.xml') class FakeGnocchiMetrics(object): def __init__(self, model): self.model = model def mock_get_statistics(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): if meter_name == 'host_cpu_usage': return self.get_compute_node_cpu_util( resource, period, aggregate, granularity) elif meter_name == 'host_ram_usage': return self.get_compute_node_ram_util( resource, period, aggregate, granularity) elif meter_name == 'instance_cpu_usage': return self.get_instance_cpu_util( resource, period, aggregate, granularity) elif meter_name == 'instance_ram_usage': return self.get_instance_ram_util( resource, period, aggregate, granularity) elif meter_name == 'instance_root_disk_size': return self.get_instance_disk_root_size( resource, period, aggregate, granularity) def get_compute_node_cpu_util(self, resource, period, aggregate, granularity): """Calculates node utilization dynamically. node CPU utilization should consider and correlate with actual instance-node mappings provided within a cluster model. Returns relative node CPU utilization <0, 100>. :param r_id: resource id """ node = self.model.get_node_by_uuid(resource.uuid) instances = self.model.get_node_instances(node) util_sum = 0.0 for instance in instances: total_cpu_util = instance.vcpus * self.get_instance_cpu_util( instance, period, aggregate, granularity) util_sum += total_cpu_util / 100.0 util_sum /= node.vcpus return util_sum * 100.0 def get_compute_node_ram_util(self, resource, period, aggregate, granularity): # Returns mock host ram usage in KB based on the allocated # instances. node = self.model.get_node_by_uuid(resource.uuid) instances = self.model.get_node_instances(node) util_sum = 0.0 for instance in instances: util_sum += self.get_instance_ram_util( instance, period, aggregate, granularity) return util_sum / 1024 @staticmethod def get_instance_cpu_util(resource, period, aggregate, granularity): instance_cpu_util = dict() instance_cpu_util['INSTANCE_0'] = 10 instance_cpu_util['INSTANCE_1'] = 30 instance_cpu_util['INSTANCE_2'] = 60 instance_cpu_util['INSTANCE_3'] = 20 instance_cpu_util['INSTANCE_4'] = 40 instance_cpu_util['INSTANCE_5'] = 50 instance_cpu_util['INSTANCE_6'] = 100 instance_cpu_util['INSTANCE_7'] = 100 instance_cpu_util['INSTANCE_8'] = 100 instance_cpu_util['INSTANCE_9'] = 100 return instance_cpu_util[str(resource.uuid)] @staticmethod def get_instance_ram_util(resource, period, aggregate, granularity): instance_ram_util = dict() instance_ram_util['INSTANCE_0'] = 1 instance_ram_util['INSTANCE_1'] = 2 instance_ram_util['INSTANCE_2'] = 4 instance_ram_util['INSTANCE_3'] = 8 instance_ram_util['INSTANCE_4'] = 3 instance_ram_util['INSTANCE_5'] = 2 instance_ram_util['INSTANCE_6'] = 1 instance_ram_util['INSTANCE_7'] = 2 instance_ram_util['INSTANCE_8'] = 4 instance_ram_util['INSTANCE_9'] = 8 return instance_ram_util[str(resource.uuid)] @staticmethod def get_instance_disk_root_size(resource, period, aggregate, granularity): instance_disk_util = dict() instance_disk_util['INSTANCE_0'] = 10 instance_disk_util['INSTANCE_1'] = 15 instance_disk_util['INSTANCE_2'] = 30 instance_disk_util['INSTANCE_3'] = 35 instance_disk_util['INSTANCE_4'] = 20 instance_disk_util['INSTANCE_5'] = 25 instance_disk_util['INSTANCE_6'] = 25 instance_disk_util['INSTANCE_7'] = 25 instance_disk_util['INSTANCE_8'] = 25 instance_disk_util['INSTANCE_9'] = 25 return instance_disk_util[str(resource.uuid)] # TODO(lpetrut): consider dropping Ceilometer support, it was deprecated # in Ocata. class FakeCeilometerMetrics(FakeGnocchiMetrics): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/faker_cluster_state.py0000664000175000017500000003016500000000000030306 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from unittest import mock from watcher.common import utils from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import element from watcher.decision_engine.model import model_root as modelroot volume_uuid_mapping = { "volume_0": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "volume_1": "74454247-a064-4b34-8f43-89337987720e", "volume_2": "a16c811e-2521-4fd3-8779-6a94ccb3be73", "volume_3": "37856b95-5be4-4864-8a49-c83f55c66780", } class FakerModelCollector(base.BaseClusterDataModelCollector): def __init__(self, config=None, osc=None, audit_scope=None): if config is None: config = mock.Mock(period=777) super(FakerModelCollector, self).__init__(config) @property def notification_endpoints(self): return [] def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return modelroot.ModelRoot.from_xml(self.load_data(filename)) def get_audit_scope_handler(self, audit_scope): return None def execute(self): return self._cluster_data_model or self.build_scenario_1() def build_scenario_1(self): instances = [] model = modelroot.ModelRoot() # number of nodes node_count = 5 # number max of instance per node node_instance_count = 7 # total number of virtual machine instance_count = (node_count * node_instance_count) for id_ in range(0, node_count): node_uuid = "Node_{0}".format(id_) hostname = "hostname_{0}".format(id_) node_attributes = { "id": id_, "uuid": node_uuid, "hostname": hostname, "memory": 132, "memory_mb_reserved": 0, "memory_ratio": 1, "disk": 250, "disk_capacity": 250, "disk_gb_reserved": 0, "disk_ratio": 1, "vcpus": 40, "vcpu_reserved": 0, "vcpu_ratio": 1, } node = element.ComputeNode(**node_attributes) model.add_node(node) for i in range(0, instance_count): instance_uuid = "INSTANCE_{0}".format(i) if instance_uuid == "INSTANCE_1": project_id = "26F03131-32CB-4697-9D61-9123F87A8147" elif instance_uuid == "INSTANCE_2": project_id = "109F7909-0607-4712-B32C-5CC6D49D2F15" else: project_id = "91FFFE30-78A0-4152-ACD2-8310FF274DC9" instance_attributes = { "uuid": instance_uuid, "name": instance_uuid, "memory": 2, "disk": 20, "disk_capacity": 20, "vcpus": 10, "metadata": '{"optimize": true,"top": "floor","nested": {"x": "y"}}', "project_id": project_id } instance = element.Instance(**instance_attributes) instances.append(instance) model.add_instance(instance) mappings = [ ("INSTANCE_0", "Node_0"), ("INSTANCE_1", "Node_0"), ("INSTANCE_2", "Node_1"), ("INSTANCE_3", "Node_2"), ("INSTANCE_4", "Node_2"), ("INSTANCE_5", "Node_2"), ("INSTANCE_6", "Node_3"), ("INSTANCE_7", "Node_4"), ] for instance_uuid, node_uuid in mappings: model.map_instance( model.get_instance_by_uuid(instance_uuid), model.get_node_by_uuid(node_uuid), ) return model def generate_scenario_1(self): return self.load_model('scenario_1.xml') def generate_scenario_1_with_1_node_unavailable(self): return self.load_model('scenario_1_with_1_node_unavailable.xml') def generate_scenario_1_with_all_nodes_disable(self): return self.load_model('scenario_1_with_all_nodes_disable.xml') def generate_scenario_1_with_all_instances_exclude(self): return self.load_model('scenario_1_with_all_instances_exclude.xml') def generate_scenario_3_with_2_nodes(self): return self.load_model('scenario_3_with_2_nodes.xml') def generate_scenario_4_with_1_node_no_instance(self): return self.load_model('scenario_4_with_1_node_no_instance.xml') def generate_scenario_5_with_instance_disk_0(self): return self.load_model('scenario_5_with_instance_disk_0.xml') def generate_scenario_6_with_2_nodes(self): return self.load_model('scenario_6_with_2_nodes.xml') def generate_scenario_7_with_2_nodes(self): return self.load_model('scenario_7_with_2_nodes.xml') def generate_scenario_8_with_4_nodes(self): return self.load_model('scenario_8_with_4_nodes.xml') def generate_scenario_9_with_3_active_plus_1_disabled_nodes(self): return self.load_model( 'scenario_9_with_3_active_plus_1_disabled_nodes.xml') def generate_scenario_10(self): return self.load_model('scenario_10.xml') class FakerStorageModelCollector(base.BaseClusterDataModelCollector): def __init__(self, config=None, osc=None, audit_scope=None): if config is None: config = mock.Mock(period=777) super(FakerStorageModelCollector, self).__init__(config) @property def notification_endpoints(self): return [] def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return modelroot.StorageModelRoot.from_xml(self.load_data(filename)) def get_audit_scope_handler(self, audit_scope): return None def execute(self): return self._cluster_data_model or self.build_scenario_1() def build_scenario_1(self): model = modelroot.StorageModelRoot() # number of nodes node_count = 2 # number of pools per node pool_count = 2 # number of volumes volume_count = 9 for i in range(0, node_count): host = "host_{0}@backend_{0}".format(i) zone = "zone_{0}".format(i) volume_type = ["type_{0}".format(i)] node_attributes = { "host": host, "zone": zone, "status": 'enabled', "state": 'up', "volume_type": volume_type, } node = element.StorageNode(**node_attributes) model.add_node(node) for j in range(0, pool_count): name = "host_{0}@backend_{0}#pool_{1}".format(i, j) pool_attributes = { "name": name, "total_volumes": 2, "total_capacity_gb": 500, "free_capacity_gb": 420, "provisioned_capacity_gb": 80, "allocated_capacity_gb": 80, "virtual_free": 420, } pool = element.Pool(**pool_attributes) model.add_pool(pool) mappings = [ ("host_0@backend_0#pool_0", "host_0@backend_0"), ("host_0@backend_0#pool_1", "host_0@backend_0"), ("host_1@backend_1#pool_0", "host_1@backend_1"), ("host_1@backend_1#pool_1", "host_1@backend_1"), ] for pool_name, node_name in mappings: model.map_pool( model.get_pool_by_pool_name(pool_name), model.get_node_by_name(node_name), ) volume_uuid_mapping = [ "5028b1eb-8749-48ae-a42c-5bdd1323976f", "74454247-a064-4b34-8f43-89337987720e", "a16c811e-2521-4fd3-8779-6a94ccb3be73", "37856b95-5be4-4864-8a49-c83f55c66780", "694f8fb1-df96-46be-b67d-49f2c14a495e", "66b094b0-8fc3-4a94-913f-a5f9312b11a5", "e9013810-4b4c-4b94-a056-4c36702d51a3", "07976191-6a57-4c35-9f3c-55b3b5ecd6d5", "4d1c952d-95d0-4aac-82aa-c3cb509af9f3", ] for k in range(volume_count): uuid = volume_uuid_mapping[k] name = "name_{0}".format(k) volume_attributes = { "size": 40, "status": "in-use", "uuid": uuid, "attachments": '[{"server_id": "server","attachment_id": "attachment"}]', "name": name, "multiattach": 'True', "snapshot_id": uuid, "project_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": '{"readonly": false,"attached_mode": "rw"}', "bootable": 'False' } volume = element.Volume(**volume_attributes) model.add_volume(volume) mappings = [ (volume_uuid_mapping[0], "host_0@backend_0#pool_0"), (volume_uuid_mapping[1], "host_0@backend_0#pool_0"), (volume_uuid_mapping[2], "host_0@backend_0#pool_1"), (volume_uuid_mapping[3], "host_0@backend_0#pool_1"), (volume_uuid_mapping[4], "host_1@backend_1#pool_0"), (volume_uuid_mapping[5], "host_1@backend_1#pool_0"), (volume_uuid_mapping[6], "host_1@backend_1#pool_1"), (volume_uuid_mapping[7], "host_1@backend_1#pool_1"), ] for volume_uuid, pool_name in mappings: model.map_volume( model.get_volume_by_uuid(volume_uuid), model.get_pool_by_pool_name(pool_name), ) return model def generate_scenario_1(self): return self.load_model('storage_scenario_1.xml') class FakerBaremetalModelCollector(base.BaseClusterDataModelCollector): def __init__(self, config=None, osc=None): if config is None: config = mock.Mock(period=777) super(FakerBaremetalModelCollector, self).__init__(config) @property def notification_endpoints(self): return [] def get_audit_scope_handler(self, audit_scope): return None def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return modelroot.BaremetalModelRoot.from_xml(self.load_data(filename)) def execute(self): return self._cluster_data_model or self.build_scenario_1() def build_scenario_1(self): model = modelroot.BaremetalModelRoot() # number of nodes node_count = 2 for i in range(0, node_count): uuid = utils.generate_uuid() node_attributes = { "uuid": uuid, "power_state": "power on", "maintenance": "false", "maintenance_reason": "null", "extra": {"compute_node_id": i} } node = element.IronicNode(**node_attributes) model.add_node(node) return model def generate_scenario_1(self): return self.load_model('ironic_scenario_1.xml') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/gnocchi_metrics.py0000664000175000017500000002221600000000000027413 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import oslo_utils class FakeGnocchiMetrics(object): NAME = 'gnocchi' def __init__(self): self.emptytype = "" def empty_one_metric(self, emptytype): self.emptytype = emptytype def mock_get_statistics(self, resource=None, resource_type=None, meter_name=None, period=None, aggregate='mean', granularity=None): result = 0 if meter_name == 'host_cpu_usage': result = self.get_usage_compute_node_cpu(resource) elif meter_name == 'host_ram_usage': result = self.get_usage_compute_node_ram(resource) elif meter_name == 'host_outlet_temp': result = self.get_average_outlet_temperature(resource) elif meter_name == 'host_inlet_temp': result = self.get_average_inlet_temp(resource) elif meter_name == 'host_airflow': result = self.get_average_airflow(resource) elif meter_name == 'host_power': result = self.get_average_power(resource) elif meter_name == 'instance_cpu_usage': result = self.get_average_usage_instance_cpu(resource) elif meter_name == 'instance_ram_usage': result = self.get_average_usage_instance_memory(resource) return result def mock_get_statistics_nn(self, resource=None, meter_name=None, period=None, aggregate='mean', granularity=300): """Statistics for noisy neighbor strategy Signature should match DataSourceBase.get_instance_l3_cache_usage """ result = 0.0 if period == 100: result = self.get_average_l3_cache_current(resource) if period == 200: result = self.get_average_l3_cache_previous(resource) return result def mock_get_statistics_wb(self, resource=None, resource_type=None, meter_name=None, period=None, aggregate='mean', granularity=300): """Statistics for workload balance strategy""" result = 0.0 if meter_name == 'instance_cpu_usage': result = self.get_average_usage_instance_cpu_wb(resource) elif meter_name == 'instance_ram_usage': result = self.get_average_usage_instance_memory_wb(resource) return result @staticmethod def get_average_l3_cache_current(resource): """The average l3 cache used by instance""" uuid = resource.uuid mock = {} mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 35 * oslo_utils.units.Ki mock['cae81432-1631-4d4e-b29c-6f3acdcde906'] = 30 * oslo_utils.units.Ki mock['INSTANCE_3'] = 40 * oslo_utils.units.Ki mock['INSTANCE_4'] = 35 * oslo_utils.units.Ki return mock[str(uuid)] @staticmethod def get_average_l3_cache_previous(resource): """The average l3 cache used by instance""" uuid = resource.uuid mock = {} mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 34.5 * ( oslo_utils.units.Ki) mock['cae81432-1631-4d4e-b29c-6f3acdcde906'] = 30.5 * ( oslo_utils.units.Ki) mock['INSTANCE_3'] = 60 * oslo_utils.units.Ki mock['INSTANCE_4'] = 22.5 * oslo_utils.units.Ki return mock[str(uuid)] @staticmethod def get_average_outlet_temperature(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock["Node_0"] = 30 mock["fa69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 30 # use a big value to make sure it exceeds threshold mock["af69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 100 return mock[str(uuid)] @staticmethod def get_usage_compute_node_ram(resource): uuid = resource.uuid mock = {} # Gnocchi returns hardware.memory.used samples in KB. mock['Node_0'] = 7 * oslo_utils.units.Ki mock['Node_1'] = 5 * oslo_utils.units.Ki mock['Node_2'] = 29 * oslo_utils.units.Ki mock['Node_3'] = 8 * oslo_utils.units.Ki mock['Node_4'] = 4 * oslo_utils.units.Ki return float(mock[str(uuid)]) @staticmethod def get_average_airflow(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock['Node_0'] = 400 # use a big value to make sure it exceeds threshold mock['Node_1'] = 100 return mock[str(uuid)] @staticmethod def get_average_inlet_temp(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock['Node_0'] = 24 mock['Node_1'] = 26 return mock[str(uuid)] @staticmethod def get_average_power(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock['Node_0'] = 260 mock['Node_1'] = 240 return mock[str(uuid)] @staticmethod def get_usage_compute_node_cpu(*args, **kwargs): """The last VM CPU usage values to average :param uuid: instance UUID :return: float value """ resource = args[0] uuid = "%s_%s" % (resource.uuid, resource.hostname) # Normalize measurements = {} # node 0 measurements['Node_0_hostname_0'] = 7 measurements['Node_1_hostname_1'] = 7 # node 1 measurements['Node_2_hostname_2'] = 80 # node 2 measurements['Node_3_hostname_3'] = 5 measurements['Node_4_hostname_4'] = 5 measurements['Node_5_hostname_5'] = 10 # node 3 measurements['Node_6_hostname_6'] = 8 # This node doesn't send metrics measurements['LOST_NODE_hostname_7'] = None measurements['Node_19_hostname_19'] = 10 # node 4 measurements['INSTANCE_7_hostname_7'] = 4 # metrics might be missing in scenarios which do not do computations if uuid not in measurements.keys(): measurements[uuid] = 0 result = measurements[uuid] return float(result) if result is not None else None @staticmethod def get_average_usage_instance_cpu(*args, **kwargs): """The last VM CPU usage values to average :param uuid: instance UUID :return: int value """ resource = args[0] uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_0'] = 7 mock['INSTANCE_1'] = 7 # node 1 mock['INSTANCE_2'] = 10 # node 2 mock['INSTANCE_3'] = 5 mock['INSTANCE_4'] = 5 mock['INSTANCE_5'] = 10 # node 3 mock['INSTANCE_6'] = 8 # node 4 mock['INSTANCE_7'] = 4 mock['LOST_INSTANCE'] = None # metrics might be missing in scenarios which do not do computations if uuid not in mock.keys(): mock[uuid] = 0 return mock[str(uuid)] @staticmethod def get_average_usage_instance_memory(resource): uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_0'] = 2 mock['INSTANCE_1'] = 5 # node 1 mock['INSTANCE_2'] = 5 # node 2 mock['INSTANCE_3'] = 8 mock['INSTANCE_4'] = 5 mock['INSTANCE_5'] = 16 # node 3 mock['INSTANCE_6'] = 8 # node 4 mock['INSTANCE_7'] = 4 return mock[str(uuid)] @staticmethod def get_average_usage_instance_disk(resource): uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_0'] = 2 mock['INSTANCE_1'] = 2 # node 1 mock['INSTANCE_2'] = 2 # node 2 mock['INSTANCE_3'] = 10 mock['INSTANCE_4'] = 15 mock['INSTANCE_5'] = 20 # node 3 mock['INSTANCE_6'] = 8 # node 4 mock['INSTANCE_7'] = 4 return mock[str(uuid)] @staticmethod def get_average_usage_instance_cpu_wb(resource): """The last VM CPU usage values to average :param uuid: instance UUID :return: float value """ uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_1'] = 80 mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50 # node 1 mock['INSTANCE_3'] = 20 mock['INSTANCE_4'] = 10 return float(mock[str(uuid)]) @staticmethod def get_average_usage_instance_memory_wb(resource): uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_1'] = 30 mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 12 # node 1 mock['INSTANCE_3'] = 12 mock['INSTANCE_4'] = 12 return mock[str(uuid)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/monasca_metrics.py0000664000175000017500000001003100000000000027412 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class FakeMonascaMetrics(object): def __init__(self): self.emptytype = "" def empty_one_metric(self, emptytype): self.emptytype = emptytype def mock_get_statistics(self, resource=None, resource_type=None, meter_name=None, period=None, aggregate='mean', granularity=None): result = 0.0 if meter_name == 'host_cpu_usage': result = self.get_usage_compute_node_cpu(resource) elif meter_name == 'instance_cpu_usage': result = self.get_average_usage_instance_cpu(resource) return result def mock_get_statistics_wb(self, resource=None, resource_type=None, meter_name=None, period=None, aggregate='mean', granularity=None): """Statistics for workload balance strategy""" result = 0.0 if meter_name == 'instance_cpu_usage': result = self.get_average_usage_instance_cpu_wb(resource) return result @staticmethod def get_usage_compute_node_cpu(*args, **kwargs): """The last VM CPU usage values to average :param uuid:00 :return: """ resource = args[0] uuid = resource.uuid measurements = {} # node 0 measurements['Node_0'] = 7 measurements['Node_1'] = 7 # node 1 measurements['Node_2'] = 80 # node 2 measurements['Node_3'] = 5 measurements['Node_4'] = 5 measurements['Node_5'] = 10 # node 3 measurements['Node_6'] = 8 measurements['Node_19'] = 10 # node 4 measurements['INSTANCE_7'] = 4 if uuid not in measurements.keys(): # measurements[uuid] = random.randint(1, 4) measurements[uuid] = 8 statistics = [ {'columns': ['avg'], 'statistics': [[float(measurements[str(uuid)])]]}] cpu_usage = None for stat in statistics: avg_col_idx = stat['columns'].index('avg') values = [r[avg_col_idx] for r in stat['statistics']] value = float(sum(values)) / len(values) cpu_usage = value return cpu_usage @staticmethod def get_average_usage_instance_cpu(*args, **kwargs): """The last VM CPU usage values to average :param uuid:00 :return: """ resource = args[0] uuid = resource.uuid measurements = {} # node 0 measurements['INSTANCE_0'] = 7 measurements['INSTANCE_1'] = 7 # node 1 measurements['INSTANCE_2'] = 10 # node 2 measurements['INSTANCE_3'] = 5 measurements['INSTANCE_4'] = 5 measurements['INSTANCE_5'] = 10 # node 3 measurements['INSTANCE_6'] = 8 # node 4 measurements['INSTANCE_7'] = 4 if uuid not in measurements.keys(): # measurements[uuid] = random.randint(1, 4) measurements[uuid] = 8 statistics = [ {'columns': ['avg'], 'statistics': [[float(measurements[str(uuid)])]]}] cpu_usage = None for stat in statistics: avg_col_idx = stat['columns'].index('avg') values = [r[avg_col_idx] for r in stat['statistics']] value = float(sum(values)) / len(values) cpu_usage = value return cpu_usage ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6511352 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/0000775000175000017500000000000000000000000026364 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/__init__.py0000664000175000017500000000000000000000000030463 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6551352 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/0000775000175000017500000000000000000000000027275 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/capacity.json0000664000175000017500000000047600000000000031774 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "capacity.host1@backend1#pool1", "event_type": "capacity.pool", "payload": { "name_to_id": "capacity.host1@backend1#pool1", "total": 3, "free": 1, "allocated": 2, "provisioned": 2, "virtual_free": 1, "reported_at": "2017-05-15T13:42:11Z" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-create-end.json0000664000175000017500000001011300000000000033775 0ustar00zuulzuul00000000000000{ "event_type": "instance.create.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "keypairs": [ { "nova_object.data": { "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", "name": "my-key", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "type": "ssh", "user_id": "fake" }, "nova_object.name": "KeypairPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "tags": [ "tag" ], "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "trusted_image_certificates": [ "cert-id-1", "cert-id-2" ], "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "c03c0bf9-f46e-4e4f-93f1-817568567ee2" }, "nova_object.name": "InstanceCreatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.10" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-delete-end.json0000664000175000017500000000327600000000000034010 0ustar00zuulzuul00000000000000{ "event_type":"instance.delete.end", "payload":{ "nova_object.data":{ "architecture":"x86_64", "availability_zone":null, "created_at":"2012-10-29T13:42:11Z", "deleted_at":"2012-10-29T13:42:11Z", "display_name":"some-server", "fault":null, "host":"compute", "host_name":"some-server", "ip_addresses":[], "kernel_id":"", "launched_at":"2012-10-29T13:42:11Z", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "metadata":{}, "node":"fake-mini", "os_type":null, "progress":0, "ramdisk_id":"", "reservation_id":"r-npxv0e40", "state":"deleted", "task_state":null, "power_state":"pending", "tenant_id":"6f70656e737461636b20342065766572", "terminated_at":"2012-10-29T13:42:11Z", "flavor": { "nova_object.name": "FlavorPayload", "nova_object.data": { "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "root_gb": 1, "vcpus": 1, "ephemeral_gb": 0, "memory_mb": 512 }, "nova_object.version": "1.0", "nova_object.namespace": "nova" }, "user_id":"fake", "uuid":"73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name":"InstanceActionPayload", "nova_object.namespace":"nova", "nova_object.version":"1.0" }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-live_migration_force_complete-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-live_migration_0000664000175000017500000000730500000000000034176 0ustar00zuulzuul00000000000000{ "event_type": "instance.live_migration_force_complete.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "admin", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "Node_1", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": "migrating", "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-live_migration_post-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-live_migration_0000664000175000017500000000725500000000000034202 0ustar00zuulzuul00000000000000{ "event_type": "instance.live_migration_post.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "admin", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "Node_1", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "host2", "os_type": null, "power_state": "pending", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-lock.json0000664000175000017500000000723500000000000032731 0ustar00zuulzuul00000000000000{ "event_type": "instance.lock", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": true, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-pause-end.json0000664000175000017500000000724500000000000033663 0ustar00zuulzuul00000000000000{ "event_type": "instance.pause.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "paused", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-power_off-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-power_off-end.j0000664000175000017500000000725300000000000034013 0ustar00zuulzuul00000000000000{ "event_type": "instance.power_off.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "shutdown", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "stopped", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-power_on-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-power_on-end.js0000664000175000017500000000725000000000000034035 0ustar00zuulzuul00000000000000{ "event_type": "instance.power_on.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-rebuild-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-rebuild-end.jso0000664000175000017500000000745500000000000034021 0ustar00zuulzuul00000000000000{ "event_type": "instance.rebuild.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": null, "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "Node_1", "host_name": "some-server", "image_uuid": "a2459075-d96c-40d5-893e-577ff92e721c", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "trusted_image_certificates": [ "rebuild-cert-id-1", "rebuild-cert-id-2" ], "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionRebuildPayload", "nova_object.namespace": "nova", "nova_object.version": "1.8" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-rescue-end.json0000664000175000017500000000736600000000000034040 0ustar00zuulzuul00000000000000{ "event_type": "instance.rescue.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "shutdown", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "rescue_image_ref": "a2459075-d96c-40d5-893e-577ff92e721c", "reservation_id": "r-npxv0e40", "state": "rescued", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionRescuePayload", "nova_object.namespace": "nova", "nova_object.version": "1.2" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-resize_confirm-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-resize_confirm-0000664000175000017500000000721100000000000034116 0ustar00zuulzuul00000000000000{ "event_type": "instance.resize_confirm.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "2", "is_public": true, "memory_mb": 2048, "name": "m1.small", "projects": null, "root_gb": 20, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "Node_1", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-restore-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-restore-end.jso0000664000175000017500000000724700000000000034055 0ustar00zuulzuul00000000000000{ "event_type": "instance.restore.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-resume-end.json0000664000175000017500000000724600000000000034047 0ustar00zuulzuul00000000000000{ "event_type": "instance.resume.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-shelve-end.json0000664000175000017500000000725000000000000034030 0ustar00zuulzuul00000000000000{ "event_type": "instance.shelve.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "shutdown", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "shelved", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-shutdown-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-shutdown-end.js0000664000175000017500000000607100000000000034060 0ustar00zuulzuul00000000000000{ "event_type": "instance.shutdown.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "stopped", "task_state": "deleting", "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-soft_delete-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-soft_delete-end0000664000175000017500000000730400000000000034067 0ustar00zuulzuul00000000000000{ "event_type": "instance.soft_delete.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": "2012-10-29T13:42:11Z", "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "soft-delete", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:fake-mini" } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-suspend-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-suspend-end.jso0000664000175000017500000000725200000000000034047 0ustar00zuulzuul00000000000000{ "event_type": "instance.suspend.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "suspended", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-unlock.json0000664000175000017500000000724000000000000033270 0ustar00zuulzuul00000000000000{ "event_type": "instance.unlock", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-unpause-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-unpause-end.jso0000664000175000017500000000724700000000000034052 0ustar00zuulzuul00000000000000{ "event_type": "instance.unpause.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-unrescue-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-unrescue-end.js0000664000175000017500000000725000000000000034036 0ustar00zuulzuul00000000000000{ "event_type": "instance.unrescue.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-unshelve-end.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-unshelve-end.js0000664000175000017500000000725000000000000034036 0ustar00zuulzuul00000000000000{ "event_type": "instance.unshelve.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/instance-update.json0000664000175000017500000000470700000000000033264 0ustar00zuulzuul00000000000000{ "event_type": "instance.update", "payload": { "nova_object.data": { "architecture": "x86_64", "audit_period": { "nova_object.data": { "audit_period_beginning": "2012-10-01T00:00:00Z", "audit_period_ending": "2012-10-29T13:42:11Z"}, "nova_object.name": "AuditPeriodPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, "availability_zone": null, "bandwidth": [], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_name": "some-server", "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "kernel_id": "", "launched_at": null, "metadata": {}, "node": "fake-mini", "old_display_name": null, "os_type": null, "progress": 0, "ramdisk_id": "", "reservation_id": "r-sd3ygfjj", "state": "paused", "task_state": "scheduling", "power_state": "pending", "ip_addresses": [], "state_update": { "nova_object.data": { "new_task_state": null, "old_state": null, "old_task_state": null, "state": "active"}, "nova_object.name": "InstanceStateUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "flavor": { "nova_object.name": "FlavorPayload", "nova_object.data": { "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "root_gb": 1, "vcpus": 1, "ephemeral_gb": 0, "memory_mb": 512 }, "nova_object.version": "1.0", "nova_object.namespace": "nova" }, "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc"}, "nova_object.name": "InstanceUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_instance-update.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_instance-updat0000664000175000017500000000471600000000000034115 0ustar00zuulzuul00000000000000{ "event_type": "instance.update", "payload": { "nova_object.data": { "architecture": "x86_64", "audit_period": { "nova_object.data": { "audit_period_beginning": "2012-10-01T00:00:00Z", "audit_period_ending": "2012-10-29T13:42:11Z"}, "nova_object.name": "AuditPeriodPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, "availability_zone": null, "bandwidth": [], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_name": "NEW_INSTANCE0", "host": "Node_0", "host_name": "NEW_INSTANCE0", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "kernel_id": "", "launched_at": null, "metadata": {}, "node": "hostname_0", "old_display_name": null, "os_type": null, "progress": 0, "ramdisk_id": "", "reservation_id": "r-sd3ygfjj", "state": "paused", "task_state": "scheduling", "power_state": "pending", "ip_addresses": [], "state_update": { "nova_object.data": { "old_task_state": null, "new_task_state": null, "old_state": "paused", "state": "paused"}, "nova_object.name": "InstanceStateUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "flavor": { "nova_object.name": "FlavorPayload", "nova_object.data": { "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "root_gb": 1, "vcpus": 1, "ephemeral_gb": 0, "memory_mb": 512 }, "nova_object.version": "1.0", "nova_object.namespace": "nova" }, "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc"}, "nova_object.name": "InstanceUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "priority": "INFO", "publisher_id": "nova-compute:Node_0" } ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instance-update.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_insta0000664000175000017500000000502600000000000034223 0ustar00zuulzuul00000000000000{ "event_type": "instance.update", "payload": { "nova_object.data": { "architecture": "x86_64", "audit_period": { "nova_object.data": { "audit_period_beginning": "2012-10-01T00:00:00Z", "audit_period_ending": "2012-10-29T13:42:11Z"}, "nova_object.name": "AuditPeriodPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, "availability_zone": null, "bandwidth": [], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7", "host": "Node_2", "host_name": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "kernel_id": "", "launched_at": null, "metadata": {}, "node": "hostname_0", "old_display_name": null, "os_type": null, "progress": 0, "ramdisk_id": "", "reservation_id": "r-sd3ygfjj", "state": "paused", "task_state": "scheduling", "power_state": "pending", "ip_addresses": [], "state_update": { "nova_object.data": { "old_task_state": null, "new_task_state": null, "old_state": "paused", "state": "paused"}, "nova_object.name": "InstanceStateUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "flavor": { "nova_object.name": "FlavorPayload", "nova_object.data": { "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "root_gb": 1, "vcpus": 1, "ephemeral_gb": 0, "memory_mb": 512 }, "nova_object.version": "1.0", "nova_object.namespace": "nova" }, "user_id": "fake", "uuid": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7"}, "nova_object.name": "InstanceUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "priority": "INFO", "publisher_id": "nova-compute:Node_2" } ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy_instance-update.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legac0000664000175000017500000000313200000000000034154 0ustar00zuulzuul00000000000000{ "publisher_id": "compute:Node_2", "event_type": "compute.instance.update", "payload": { "access_ip_v4": null, "access_ip_v6": null, "architecture": null, "audit_period_beginning": "2016-08-17T13:00:00.000000", "audit_period_ending": "2016-08-17T13:56:05.262440", "availability_zone": "nova", "bandwidth": {}, "cell_name": "", "created_at": "2016-08-17 13:53:23+00:00", "deleted_at": "", "disk_gb": 1, "display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7", "ephemeral_gb": 0, "host": "Node_2", "hostname": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7", "image_meta": { "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", "container_format": "bare", "disk_format": "qcow2", "min_disk": "1", "min_ram": "0" }, "image_ref_url": "http://10.50.0.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", "instance_flavor_id": "1", "instance_id": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7", "instance_type": "m1.tiny", "instance_type_id": 2, "kernel_id": "", "launched_at": "2016-08-17T13:53:35.000000", "memory_mb": 512, "metadata": {}, "new_task_state": null, "node": "hostname_0", "old_state": "paused", "old_task_state": null, "os_type": null, "progress": "", "ramdisk_id": "", "reservation_id": "r-0822ymml", "root_gb": 1, "state": "paused", "state_description": "paused", "tenant_id": "a4b4772d93c74d5e8b7c68cdd2a014e1", "terminated_at": "", "user_id": "ce64facc93354bbfa90f4f9f9a3e1e75", "vcpus": 1 } } ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-disabled.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_service-update0000664000175000017500000000116400000000000034110 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "nova_object.namespace": "nova", "nova_object.name": "ServiceStatusPayload", "nova_object.version": "1.0", "nova_object.data": { "host": "hostname_0", "disabled": true, "last_seen_up": "2012-10-29T13:42:05Z", "binary": "nova-compute", "topic": "compute", "disabled_reason": "watcher_disabled", "report_count": 1, "forced_down": true, "version": 15 } }, "event_type": "service.update", "publisher_id": "nova-compute:Node_0" } ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-enabled.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_service-update0000664000175000017500000000115000000000000034103 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "nova_object.namespace": "nova", "nova_object.name": "ServiceStatusPayload", "nova_object.version": "1.0", "nova_object.data": { "host": "hostname_0", "disabled": false, "last_seen_up": "2012-10-29T13:42:05Z", "binary": "nova-compute", "topic": "compute", "disabled_reason": null, "report_count": 1, "forced_down": false, "version": 15 } }, "event_type": "service.update", "publisher_id": "nova-compute:Node_0" } ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volume-create.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volu0000664000175000017500000000105300000000000034074 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.create.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "990a723f-6c19-4f83-8526-6383c9e9389f", "display_name": "name_00", "size": "40", "status": "available", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "52044438-21f2-4a48-add4-d48bab20f7e1", "metadata": {"readonly": false, "attached_mode": "rw"}, "glance_metadata": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity.json0000664000175000017500000000050300000000000034066 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "capacity.host_0@backend_0#pool_0", "event_type": "capacity.pool", "payload": { "name_to_id": "host_0@backend_0#pool_0", "total": 500, "free": 460, "allocated": 40, "provisioned": 40, "virtual_free": 460, "reported_at": "2017-05-15T13:42:11Z" } } ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_notfound.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node0000664000175000017500000000050300000000000034123 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "capacity.host_2@backend_2#pool_0", "event_type": "capacity.pool", "payload": { "name_to_id": "host_2@backend_2#pool_0", "total": 500, "free": 460, "allocated": 40, "provisioned": 40, "virtual_free": 460, "reported_at": "2017-05-15T13:42:11Z" } } ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_notfound.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool0000664000175000017500000000050500000000000034151 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "capacity.host_0@backend_0#pool_2", "event_type": "capacity.pool", "payload": { "name_to_id": "host_0@backend_0#pool_2", "total": 500, "free": 380, "allocated": 120, "provisioned": 120, "virtual_free": 380, "reported_at": "2017-05-15T13:42:11Z" } } ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-create.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-0000664000175000017500000000062700000000000034043 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.create.end", "payload": { "host": "", "volume_id": "990a723f-6c19-4f83-8526-6383c9e9389f", "display_name": "name_00", "size": "40", "status": "error", "volume_attachment": [], "snapshot_id": "", "tenant_id": "52044438-21f2-4a48-add4-d48bab20f7e1", "metadata": {} } } ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach0000664000175000017500000000101400000000000034070 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.attach.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "display_name": "name_0", "size": "40", "status": "in-use", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": {"readonly": false, "attached_mode": "rw"} } } ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create0000664000175000017500000000102000000000000034064 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.create.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "990a723f-6c19-4f83-8526-6383c9e9389f", "display_name": "name_00", "size": "40", "status": "available", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "52044438-21f2-4a48-add4-d48bab20f7e1", "metadata": {"readonly": false, "attached_mode": "rw"} } } ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_pool_notfound.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create0000664000175000017500000000102000000000000034064 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_2@backend_2#pool_0", "event_type": "volume.create.end", "payload": { "host": "host_2@backend_2#pool_0", "volume_id": "990a723f-6c19-4f83-8526-6383c9e9389f", "display_name": "name_00", "size": "40", "status": "available", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "52044438-21f2-4a48-add4-d48bab20f7e1", "metadata": {"readonly": false, "attached_mode": "rw"} } } ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete0000664000175000017500000000101600000000000034070 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.delete.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "display_name": "name_0", "size": "40", "status": "deleting", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": {"readonly": false, "attached_mode": "rw"} } } ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach0000664000175000017500000000066100000000000034063 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.detach.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "display_name": "name_0", "size": "40", "status": "available", "volume_attachment": [], "snapshot_id": "", "tenant_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": {} } } ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize0000664000175000017500000000101400000000000034125 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.resize.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "display_name": "name_0", "size": "20", "status": "in-use", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": {"readonly": false, "attached_mode": "rw"} } } ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.json 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update0000664000175000017500000000101600000000000034110 0ustar00zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.update.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "display_name": "name_01", "size": "40", "status": "enabled", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": {"readonly": false, "attached_mode": "rw"} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/service-create.json0000664000175000017500000000126400000000000033074 0ustar00zuulzuul00000000000000{ "event_type": "service.create", "payload": { "nova_object.data": { "availability_zone": null, "binary": "nova-compute", "disabled": false, "disabled_reason": null, "forced_down": false, "host": "host2", "last_seen_up": null, "report_count": 0, "topic": "compute", "uuid": "fafac544-906b-4a6a-a9c6-c1f7a8078c73", "version": 23 }, "nova_object.name": "ServiceStatusPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" }, "priority": "INFO", "publisher_id": "nova-compute:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/service-delete.json0000664000175000017500000000127100000000000033071 0ustar00zuulzuul00000000000000{ "event_type": "service.delete", "payload": { "nova_object.data": { "availability_zone": null, "binary": "nova-compute", "disabled": false, "disabled_reason": null, "forced_down": false, "host": "hostname_0", "last_seen_up": null, "report_count": 0, "topic": "compute", "uuid": "fa69c544-906b-4a6a-a9c6-c1f7a8078c73", "version": 23 }, "nova_object.name": "ServiceStatusPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" }, "priority": "INFO", "publisher_id": "nova-compute:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/data/service-update.json0000664000175000017500000000130500000000000033107 0ustar00zuulzuul00000000000000{ "event_type": "service.update", "payload": { "nova_object.data": { "availability_zone": null, "binary": "nova-compute", "disabled": false, "disabled_reason": null, "forced_down": false, "host": "host1", "last_seen_up": "2012-10-29T13:42:05Z", "report_count": 1, "topic": "compute", "uuid": "fa69c544-906b-4a6a-a9c6-c1f7a8078c73", "version": 23 }, "nova_object.name": "ServiceStatusPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" }, "priority": "INFO", "publisher_id": "nova-compute:host1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/fake_managers.py0000664000175000017500000000440400000000000031523 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import service_manager from watcher.decision_engine.model.notification import cinder as cnotification from watcher.decision_engine.model.notification import nova as novanotification from watcher.tests.decision_engine.model import faker_cluster_state class FakeManager(service_manager.ServiceManager): API_VERSION = '1.0' fake_cdmc = faker_cluster_state.FakerModelCollector() @property def service_name(self): return 'watcher-fake' @property def api_version(self): return self.API_VERSION @property def publisher_id(self): return 'test_publisher_id' @property def conductor_topic(self): return 'test_conductor_topic' @property def notification_topics(self): return ['nova'] @property def conductor_endpoints(self): return [] # Disable audit endpoint @property def notification_endpoints(self): return [ novanotification.VersionedNotification(self.fake_cdmc), ] class FakeStorageManager(FakeManager): fake_cdmc = faker_cluster_state.FakerStorageModelCollector() @property def notification_endpoints(self): return [ cnotification.CapacityNotificationEndpoint(self.fake_cdmc), cnotification.VolumeCreateEnd(self.fake_cdmc), cnotification.VolumeUpdateEnd(self.fake_cdmc), cnotification.VolumeDeleteEnd(self.fake_cdmc), cnotification.VolumeAttachEnd(self.fake_cdmc), cnotification.VolumeDetachEnd(self.fake_cdmc), cnotification.VolumeResizeEnd(self.fake_cdmc), ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/test_cinder_notifications.py0000664000175000017500000006170000000000000034176 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from unittest import mock from oslo_serialization import jsonutils from oslo_utils import timeutils from watcher.common import cinder_helper from watcher.common import context from watcher.common import exception from watcher.common import service as watcher_service from watcher.db.sqlalchemy import api as db_api from watcher.decision_engine.model.notification import cinder as cnotification from watcher.tests import base as base_test from watcher.tests.db import utils from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model.notification import fake_managers class NotificationTestCase(base_test.TestCase): @staticmethod def load_message(filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as json_file: json_data = jsonutils.load(json_file) return json_data class TestReceiveCinderNotifications(NotificationTestCase): FAKE_METADATA = {'message_id': None, 'timestamp': None} def setUp(self): super(TestReceiveCinderNotifications, self).setUp() p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') m_from_dict = p_from_dict.start() m_from_dict.return_value = self.context self.addCleanup(p_from_dict.stop) p_get_service_list = mock.patch.object( db_api.Connection, 'get_service_list') p_update_service = mock.patch.object( db_api.Connection, 'update_service') m_get_service_list = p_get_service_list.start() m_update_service = p_update_service.start() fake_service = utils.get_test_service( created_at=timeutils.utcnow()) m_get_service_list.return_value = [fake_service] m_update_service.return_value = fake_service.copy() self.addCleanup(p_get_service_list.stop) self.addCleanup(p_update_service.stop) @mock.patch.object(cnotification.CapacityNotificationEndpoint, 'info') def test_cinder_receive_capacity(self, m_info): message = self.load_message('capacity.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'capacity.host1@backend1#pool1', 'capacity.pool', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeCreateEnd, 'info') def test_cinder_receive_volume_create_end(self, m_info): message = self.load_message('scenario_1_volume-create.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.create.end', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeUpdateEnd, 'info') def test_cinder_receive_volume_update_end(self, m_info): message = self.load_message('scenario_1_volume-update.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.update.end', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeAttachEnd, 'info') def test_cinder_receive_volume_attach_end(self, m_info): message = self.load_message('scenario_1_volume-attach.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.attach.end', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeDetachEnd, 'info') def test_cinder_receive_volume_detach_end(self, m_info): message = self.load_message('scenario_1_volume-detach.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.detach.end', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeResizeEnd, 'info') def test_cinder_receive_volume_resize_end(self, m_info): message = self.load_message('scenario_1_volume-resize.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.resize.end', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeDeleteEnd, 'info') def test_cinder_receive_volume_delete_end(self, m_info): message = self.load_message('scenario_1_volume-delete.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.delete.end', expected_message, self.FAKE_METADATA) class TestCinderNotifications(NotificationTestCase): FAKE_METADATA = {'message_id': None, 'timestamp': None} def setUp(self): super(TestCinderNotifications, self).setUp() # fake cluster self.fake_cdmc = faker_cluster_state.FakerStorageModelCollector() def test_cinder_capacity(self): """test consuming capacity""" storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) pool_0_name = 'host_0@backend_0#pool_0' pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) # before self.assertEqual(pool_0_name, pool_0.name) self.assertEqual(420, pool_0.free_capacity_gb) self.assertEqual(420, pool_0.virtual_free) self.assertEqual(80, pool_0.allocated_capacity_gb) self.assertEqual(80, pool_0.provisioned_capacity_gb) message = self.load_message('scenario_1_capacity.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # after self.assertEqual(pool_0_name, pool_0.name) self.assertEqual(460, pool_0.free_capacity_gb) self.assertEqual(460, pool_0.virtual_free) self.assertEqual(40, pool_0.allocated_capacity_gb) self.assertEqual(40, pool_0.provisioned_capacity_gb) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_capacity_pool_notfound(self, m_cinder_helper): """test consuming capacity, new pool in existing node""" # storage_pool_by_name mock return_mock = mock.Mock() return_mock.configure_mock( name='host_0@backend_0#pool_2', total_volumes='2', total_capacity_gb='500', free_capacity_gb='380', provisioned_capacity_gb='120', allocated_capacity_gb='120') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_mock) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) message = self.load_message('scenario_1_capacity_pool_notfound.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # after consuming message, still pool_0 exists pool_0_name = 'host_0@backend_0#pool_0' pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) self.assertEqual(pool_0_name, pool_0.name) self.assertEqual(420, pool_0.free_capacity_gb) self.assertEqual(420, pool_0.virtual_free) self.assertEqual(80, pool_0.allocated_capacity_gb) self.assertEqual(80, pool_0.provisioned_capacity_gb) # new pool was added pool_1_name = 'host_0@backend_0#pool_2' m_get_storage_pool_by_name.assert_called_once_with(pool_1_name) storage_node = storage_model.get_node_by_pool_name(pool_1_name) self.assertEqual('host_0@backend_0', storage_node.host) pool_1 = storage_model.get_pool_by_pool_name(pool_1_name) self.assertEqual(pool_1_name, pool_1.name) self.assertEqual(500, pool_1.total_capacity_gb) self.assertEqual(380, pool_1.free_capacity_gb) self.assertEqual(120, pool_1.allocated_capacity_gb) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_capacity_node_notfound(self, m_cinder_helper): """test consuming capacity, new pool in new node""" return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_2@backend_2#pool_0', total_volumes='2', total_capacity_gb='500', free_capacity_gb='460', provisioned_capacity_gb='40', allocated_capacity_gb='40') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) # storage_node_by_name mock return_node_mock = mock.Mock() return_node_mock.configure_mock( host='host_2@backend_2', zone='nova', state='up', status='enabled') m_get_storage_node_by_name = mock.Mock( side_effect=lambda name: return_node_mock) m_get_volume_type_by_backendname = mock.Mock( side_effect=lambda name: [mock.Mock('backend_2')]) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name, get_storage_node_by_name=m_get_storage_node_by_name, get_volume_type_by_backendname=m_get_volume_type_by_backendname) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) message = self.load_message('scenario_1_capacity_node_notfound.json') # self.assertRaises(exception.StorageNodeNotFound, handler.info, handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # new pool and new node was added node_1_name = 'host_2@backend_2' pool_1_name = node_1_name + '#pool_0' volume_type = 'backend_2' m_get_storage_pool_by_name.assert_called_once_with(pool_1_name) m_get_storage_node_by_name.assert_called_once_with(node_1_name) m_get_volume_type_by_backendname.assert_called_once_with(volume_type) # new node was added storage_node = storage_model.get_node_by_pool_name(pool_1_name) self.assertEqual('host_2@backend_2', storage_node.host) # new pool was added pool_1 = storage_model.get_pool_by_pool_name(pool_1_name) self.assertEqual(pool_1_name, pool_1.name) self.assertEqual(500, pool_1.total_capacity_gb) self.assertEqual(460, pool_1.free_capacity_gb) self.assertEqual(40, pool_1.allocated_capacity_gb) self.assertEqual(40, pool_1.provisioned_capacity_gb) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_volume_create(self, m_cinder_helper): """test creating volume in existing pool and node""" # create storage_pool_by_name mock return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_0@backend_0#pool_0', total_volumes='3', total_capacity_gb='500', free_capacity_gb='380', provisioned_capacity_gb='120', allocated_capacity_gb='120') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeCreateEnd(self.fake_cdmc) message = self.load_message('scenario_1_volume-create.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # check that volume00 was added to the model volume_00_name = '990a723f-6c19-4f83-8526-6383c9e9389f' volume_00 = storage_model.get_volume_by_uuid(volume_00_name) self.assertEqual(volume_00_name, volume_00.uuid) self.assertFalse(volume_00.bootable) # check that capacity was updated pool_0_name = 'host_0@backend_0#pool_0' m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) self.assertEqual(pool_0.name, pool_0_name) self.assertEqual(3, pool_0.total_volumes) self.assertEqual(380, pool_0.free_capacity_gb) self.assertEqual(120, pool_0.allocated_capacity_gb) self.assertEqual(120, pool_0.provisioned_capacity_gb) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_bootable_volume_create(self, m_cinder_helper): """test creating bootable volume in existing pool and node""" # create storage_pool_by_name mock return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_0@backend_0#pool_0', total_volumes='3', total_capacity_gb='500', free_capacity_gb='380', provisioned_capacity_gb='120', allocated_capacity_gb='120') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeCreateEnd(self.fake_cdmc) message = self.load_message('scenario_1_bootable-volume-create.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # check that volume00 was added to the model volume_00_name = '990a723f-6c19-4f83-8526-6383c9e9389f' volume_00 = storage_model.get_volume_by_uuid(volume_00_name) self.assertEqual(volume_00_name, volume_00.uuid) self.assertTrue(volume_00.bootable) # check that capacity was updated pool_0_name = 'host_0@backend_0#pool_0' m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) self.assertEqual(pool_0.name, pool_0_name) self.assertEqual(3, pool_0.total_volumes) self.assertEqual(380, pool_0.free_capacity_gb) self.assertEqual(120, pool_0.allocated_capacity_gb) self.assertEqual(120, pool_0.provisioned_capacity_gb) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_volume_create_pool_notfound(self, m_cinder_helper): """check creating volume in not existing pool and node""" # get_storage_pool_by_name mock return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_2@backend_2#pool_0', total_volumes='1', total_capacity_gb='500', free_capacity_gb='460', provisioned_capacity_gb='40', allocated_capacity_gb='40') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) # create storage_node_by_name mock return_node_mock = mock.Mock() return_node_mock.configure_mock( host='host_2@backend_2', zone='nova', state='up', status='enabled') m_get_storage_node_by_name = mock.Mock( side_effect=lambda name: return_node_mock) m_get_volume_type_by_backendname = mock.Mock( side_effect=lambda name: [mock.Mock('backend_2')]) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name, get_storage_node_by_name=m_get_storage_node_by_name, get_volume_type_by_backendname=m_get_volume_type_by_backendname) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeCreateEnd(self.fake_cdmc) message = self.load_message( 'scenario_1_volume-create_pool_notfound.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # check that volume00 was added to the model volume_00_name = '990a723f-6c19-4f83-8526-6383c9e9389f' volume_00 = storage_model.get_volume_by_uuid(volume_00_name) self.assertEqual(volume_00_name, volume_00.uuid) # check that capacity was updated node_2_name = 'host_2@backend_2' pool_0_name = node_2_name + '#pool_0' pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) self.assertEqual(pool_0.name, pool_0_name) self.assertEqual(1, pool_0.total_volumes) self.assertEqual(460, pool_0.free_capacity_gb) self.assertEqual(40, pool_0.allocated_capacity_gb) self.assertEqual(40, pool_0.provisioned_capacity_gb) # check that node was added m_get_storage_node_by_name.assert_called_once_with(node_2_name) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_error_volume_unmapped(self, m_cinder_helper): """test creating error volume unmapped""" m_get_storage_pool_by_name = mock.Mock( side_effect=exception.PoolNotFound(name="TEST")) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeCreateEnd(self.fake_cdmc) message = self.load_message('scenario_1_error-volume-create.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # we do not call get_storage_pool_by_name m_get_storage_pool_by_name.assert_not_called() # check that volume00 was added to the model volume_00_name = '990a723f-6c19-4f83-8526-6383c9e9389f' volume_00 = storage_model.get_volume_by_uuid(volume_00_name) self.assertEqual(volume_00_name, volume_00.uuid) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_volume_update(self, m_cinder_helper): """test updating volume in existing pool and node""" storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeUpdateEnd(self.fake_cdmc) volume_0_name = faker_cluster_state.volume_uuid_mapping['volume_0'] volume_0 = storage_model.get_volume_by_uuid(volume_0_name) self.assertEqual('name_0', volume_0.name) # create storage_pool_by name mock return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_0@backend_0#pool_0', total_volumes='2', total_capacity_gb='500', free_capacity_gb='420', provisioned_capacity_gb='80', allocated_capacity_gb='80') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) message = self.load_message('scenario_1_volume-update.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # check that name of volume_0 was updated in the model volume_0 = storage_model.get_volume_by_uuid(volume_0_name) self.assertEqual('name_01', volume_0.name) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_volume_delete(self, m_cinder_helper): """test deleting volume""" # create storage_pool_by name mock return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_0@backend_0#pool_0', total_volumes='1', total_capacity_gb='500', free_capacity_gb='460', provisioned_capacity_gb='40', allocated_capacity_gb='40') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeDeleteEnd(self.fake_cdmc) # volume exists before consuming volume_0_uuid = faker_cluster_state.volume_uuid_mapping['volume_0'] volume_0 = storage_model.get_volume_by_uuid(volume_0_uuid) self.assertEqual(volume_0_uuid, volume_0.uuid) message = self.load_message('scenario_1_volume-delete.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # volume does not exists after consuming self.assertRaises( exception.VolumeNotFound, storage_model.get_volume_by_uuid, volume_0_uuid) # check that capacity was updated pool_0_name = 'host_0@backend_0#pool_0' m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) self.assertEqual(pool_0.name, pool_0_name) self.assertEqual(1, pool_0.total_volumes) self.assertEqual(460, pool_0.free_capacity_gb) self.assertEqual(40, pool_0.allocated_capacity_gb) self.assertEqual(40, pool_0.provisioned_capacity_gb) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/test_notifications.py0000664000175000017500000000720300000000000032650 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from unittest import mock from oslo_serialization import jsonutils from watcher.common import context from watcher.common import service as watcher_service from watcher.decision_engine.model.notification import base from watcher.decision_engine.model.notification import filtering from watcher.tests import base as base_test from watcher.tests.decision_engine.model.notification import fake_managers class DummyManager(fake_managers.FakeManager): @property def notification_endpoints(self): return [DummyNotification(self.fake_cdmc)] class DummyNotification(base.NotificationEndpoint): @property def filter_rule(self): return filtering.NotificationFilter( publisher_id=r'.*', event_type=r'compute.dummy', payload={'data': {'nested': r'^T.*'}}, ) def info(self, ctxt, publisher_id, event_type, payload, metadata): pass class NotificationTestCase(base_test.TestCase): def load_message(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as json_file: json_data = jsonutils.load(json_file) return json_data class TestReceiveNotifications(NotificationTestCase): def setUp(self): super(TestReceiveNotifications, self).setUp() p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') m_from_dict = p_from_dict.start() m_from_dict.return_value = self.context self.addCleanup(p_from_dict.stop) @mock.patch.object(watcher_service.ServiceHeartbeat, 'send_beat') @mock.patch.object(DummyNotification, 'info') def test_receive_dummy_notification(self, m_info, m_heartbeat): message = { 'publisher_id': 'nova-compute', 'event_type': 'compute.dummy', 'payload': {'data': {'nested': 'TEST'}}, 'priority': 'INFO', } de_service = watcher_service.Service(DummyManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'nova-compute', 'compute.dummy', {'data': {'nested': 'TEST'}}, {'message_id': None, 'timestamp': None}) @mock.patch.object(watcher_service.ServiceHeartbeat, 'send_beat') @mock.patch.object(DummyNotification, 'info') def test_skip_unwanted_notification(self, m_info, m_heartbeat): message = { 'publisher_id': 'nova-compute', 'event_type': 'compute.dummy', 'payload': {'data': {'nested': 'unwanted'}}, 'priority': 'INFO', } de_service = watcher_service.Service(DummyManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) self.assertEqual(0, m_info.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/notification/test_nova_notifications.py0000664000175000017500000010464000000000000033676 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import os_resource_classes as orc from unittest import mock from oslo_serialization import jsonutils from watcher.common import context from watcher.common import exception from watcher.common import nova_helper from watcher.common import placement_helper from watcher.common import service as watcher_service from watcher.decision_engine.model import element from watcher.decision_engine.model.notification import nova as novanotification from watcher.tests import base as base_test from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model.notification import fake_managers class NotificationTestCase(base_test.TestCase): @staticmethod def load_message(filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as json_file: json_data = jsonutils.load(json_file) return json_data class TestReceiveNovaNotifications(NotificationTestCase): FAKE_METADATA = {'message_id': None, 'timestamp': None} FAKE_NOTIFICATIONS = { 'instance.create.end': 'instance-create-end.json', 'instance.lock': 'instance-lock.json', 'instance.unlock': 'instance-unlock.json', 'instance.pause.end': 'instance-pause-end.json', 'instance.power_off.end': 'instance-power_off-end.json', 'instance.power_on.end': 'instance-power_on-end.json', 'instance.resize_confirm.end': 'instance-resize_confirm-end.json', 'instance.restore.end': 'instance-restore-end.json', 'instance.resume.end': 'instance-resume-end.json', 'instance.shelve.end': 'instance-shelve-end.json', 'instance.shutdown.end': 'instance-shutdown-end.json', 'instance.suspend.end': 'instance-suspend-end.json', 'instance.unpause.end': 'instance-unpause-end.json', 'instance.unrescue.end': 'instance-unrescue-end.json', 'instance.unshelve.end': 'instance-unshelve-end.json', 'instance.rebuild.end': 'instance-rebuild-end.json', 'instance.rescue.end': 'instance-rescue-end.json', 'instance.update': 'instance-update.json', 'instance.live_migration_force_complete.end': 'instance-live_migration_force_complete-end.json', 'instance.live_migration_post.end': 'instance-live_migration_post-end.json', 'instance.delete.end': 'instance-delete-end.json', 'instance.soft_delete.end': 'instance-soft_delete-end.json', 'service.create': 'service-create.json', 'service.delete': 'service-delete.json', 'service.update': 'service-update.json', } def setUp(self): super(TestReceiveNovaNotifications, self).setUp() p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') m_from_dict = p_from_dict.start() m_from_dict.return_value = self.context self.addCleanup(p_from_dict.stop) p_heartbeat = mock.patch.object( watcher_service.ServiceHeartbeat, "send_beat") self.m_heartbeat = p_heartbeat.start() self.addCleanup(p_heartbeat.stop) @mock.patch.object(novanotification.VersionedNotification, 'info') def test_receive_nova_notifications(self, m_info): de_service = watcher_service.Service(fake_managers.FakeManager) n_dicts = novanotification.VersionedNotification.notification_mapping for n_type in n_dicts.keys(): n_json = self.FAKE_NOTIFICATIONS[n_type] message = self.load_message(n_json) expected_message = message['payload'] publisher_id = message['publisher_id'] incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_with( self.context, publisher_id, n_type, expected_message, self.FAKE_METADATA) class TestNovaNotifications(NotificationTestCase): FAKE_METADATA = {'message_id': None, 'timestamp': None} def setUp(self): super(TestNovaNotifications, self).setUp() # fake cluster self.fake_cdmc = faker_cluster_state.FakerModelCollector() def test_nova_service_update(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) node0_name = "hostname_0" node0 = compute_model.get_node_by_name(node0_name) message = self.load_message('scenario3_service-update-disabled.json') self.assertEqual('hostname_0', node0.hostname) self.assertEqual(element.ServiceState.ONLINE.value, node0.state) self.assertEqual(element.ServiceState.ENABLED.value, node0.status) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual("hostname_0", node0.hostname) self.assertEqual(element.ServiceState.OFFLINE.value, node0.state) self.assertEqual(element.ServiceState.DISABLED.value, node0.status) message = self.load_message('scenario3_service-update-enabled.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual("hostname_0", node0.hostname) self.assertEqual(element.ServiceState.ONLINE.value, node0.state) self.assertEqual(element.ServiceState.ENABLED.value, node0.status) @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, "NovaHelper") def test_nova_service_create(self, m_nova_helper_cls, m_placement_helper): mock_placement = mock.Mock(name="placement_helper") mock_placement.get_inventories.return_value = dict() mock_placement.get_usages_for_resource_provider.return_value = { orc.DISK_GB: 10, orc.MEMORY_MB: 100, orc.VCPU: 0 } m_placement_helper.return_value = mock_placement m_get_compute_node_by_hostname = mock.Mock( side_effect=lambda uuid: mock.Mock( name='m_get_compute_node_by_uuid', id="fafac544-906b-4a6a-a9c6-c1f7a8078c73", hypervisor_hostname="host2", state='up', status='enabled', memory_mb=7777, vcpus=42, free_disk_gb=974, local_gb=1337, service={'id': 123, 'host': 'host2', 'disabled_reason': ''},)) m_nova_helper_cls.return_value = mock.Mock( get_compute_node_by_hostname=m_get_compute_node_by_hostname, name='m_nova_helper') compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) new_node_name = "host2" self.assertRaises( exception.ComputeNodeNotFound, compute_model.get_node_by_name, new_node_name) message = self.load_message('service-create.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) new_node = compute_model.get_node_by_name(new_node_name) self.assertEqual('host2', new_node.hostname) self.assertEqual(element.ServiceState.ONLINE.value, new_node.state) self.assertEqual(element.ServiceState.ENABLED.value, new_node.status) def test_nova_service_delete(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) node0_name = "hostname_0" # Before self.assertTrue(compute_model.get_node_by_name(node0_name)) message = self.load_message('service-delete.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # After self.assertRaises( exception.ComputeNodeNotFound, compute_model.get_node_by_name, node0_name) def test_nova_instance_update(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-update.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) def test_nova_instance_state_building(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) message = self.load_message('instance-update.json') message['payload']['nova_object.data']['state'] = 'building' handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # Assert that the instance state in the model is unchanged # since the 'building' state is ignored. self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, "NovaHelper") def test_nova_instance_update_notfound_still_creates( self, m_nova_helper_cls, m_placement_helper): mock_placement = mock.Mock(name="placement_helper") mock_placement.get_inventories.return_value = dict() mock_placement.get_usages_for_resource_provider.return_value = { orc.DISK_GB: 10, orc.MEMORY_MB: 100, orc.VCPU: 0 } m_placement_helper.return_value = mock_placement m_get_compute_node_by_hostname = mock.Mock( side_effect=lambda uuid: mock.Mock( name='m_get_compute_node_by_hostname', id='669966bd-a45c-4e1c-9d57-3054899a3ec7', hypervisor_hostname="Node_2", state='up', status='enabled', memory_mb=7777, vcpus=42, free_disk_gb=974, local_gb=1337, service={'id': 123, 'host': 'Node_2', 'disabled_reason': ''},)) m_nova_helper_cls.return_value = mock.Mock( get_compute_node_by_hostname=m_get_compute_node_by_hostname, name='m_nova_helper') compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' message = self.load_message('scenario3_notfound_instance-update.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) instance0 = compute_model.get_instance_by_uuid(instance0_uuid) self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) self.assertEqual(1, instance0.vcpus) self.assertEqual(1, instance0.disk) self.assertEqual(512, instance0.memory) m_get_compute_node_by_hostname.assert_called_once_with('Node_2') node_2 = compute_model.get_node_by_name('Node_2') self.assertEqual(7777, node_2.memory) self.assertEqual(42, node_2.vcpus) self.assertEqual(1337, node_2.disk) @mock.patch.object(nova_helper, "NovaHelper") def test_instance_update_node_notfound_set_unmapped( self, m_nova_helper_cls): m_get_compute_node_by_hostname = mock.Mock( side_effect=exception.ComputeNodeNotFound(name="TEST")) m_nova_helper_cls.return_value = mock.Mock( get_compute_node_by_hostname=m_get_compute_node_by_hostname, name='m_nova_helper') compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' message = self.load_message( 'scenario3_notfound_instance-update.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) instance0 = compute_model.get_instance_by_uuid(instance0_uuid) self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) self.assertEqual(1, instance0.vcpus) self.assertEqual(1, instance0.disk) self.assertEqual(512, instance0.memory) m_get_compute_node_by_hostname.assert_any_call('Node_2') self.assertRaises( exception.ComputeNodeNotFound, compute_model.get_node_by_uuid, 'Node_2') @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, 'NovaHelper') def test_nova_instance_create(self, m_nova_helper_cls, m_placement_helper): mock_placement = mock.Mock(name="placement_helper") mock_placement.get_inventories.return_value = dict() mock_placement.get_usages_for_resource_provider.return_value = { orc.DISK_GB: 10, orc.MEMORY_MB: 100, orc.VCPU: 0 } m_placement_helper.return_value = mock_placement m_get_compute_node_by_hostname = mock.Mock( side_effect=lambda uuid: mock.Mock( name='m_get_compute_node_by_hostname', id=3, hypervisor_hostname="compute", state='up', status='enabled', uuid=uuid, memory_mb=7777, vcpus=42, free_disk_gb=974, local_gb=1337, service={'id': 123, 'host': 'compute', 'disabled_reason': ''},)) m_nova_helper_cls.return_value = mock.Mock( get_compute_node_by_hostname=m_get_compute_node_by_hostname, name='m_nova_helper') compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = 'c03c0bf9-f46e-4e4f-93f1-817568567ee2' self.assertRaises( exception.InstanceNotFound, compute_model.get_instance_by_uuid, instance0_uuid) message = self.load_message('instance-create-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) hostname = message['payload']['nova_object.data']['host'] node = self.fake_cdmc.cluster_data_model.get_node_by_instance_uuid( instance0_uuid) self.assertEqual(hostname, node.hostname) m_get_compute_node_by_hostname.assert_called_once_with(hostname) instance0 = self.fake_cdmc.cluster_data_model.get_instance_by_uuid( instance0_uuid) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) self.assertEqual(1, instance0.vcpus) self.assertEqual(1, instance0.disk) self.assertEqual(512, instance0.memory) def test_nova_instance_delete_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' # Before self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid)) message = self.load_message('instance-delete-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # After self.assertRaises( exception.InstanceNotFound, compute_model.get_instance_by_uuid, instance0_uuid) def test_nova_instance_soft_delete_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' # Before self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid)) message = self.load_message('instance-soft_delete-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # After self.assertRaises( exception.InstanceNotFound, compute_model.get_instance_by_uuid, instance0_uuid) def test_live_migrated_force_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) message = self.load_message( 'instance-live_migration_force_complete-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_live_migrated_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) message = self.load_message( 'instance-live_migration_post-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_lock(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-lock.json') self.assertFalse(instance0.locked) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertTrue(instance0.locked) message = self.load_message('instance-unlock.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertFalse(instance0.locked) def test_nova_instance_pause(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-pause-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) message = self.load_message('instance-unpause-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_power_on_off(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-power_off-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.STOPPED.value, instance0.state) message = self.load_message('instance-power_on-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_instance_rebuild_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73", node.uuid) message = self.load_message('instance-rebuild-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('hostname_0', node.hostname) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_rescue(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-rescue-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.RESCUED.value, instance0.state) message = self.load_message('instance-unrescue-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_instance_resize_confirm_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) message = self.load_message( 'instance-resize_confirm-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_restore_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-restore-end.json') instance0.state = element.InstanceState.ERROR.value self.assertEqual(element.InstanceState.ERROR.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_resume_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-resume-end.json') instance0.state = element.InstanceState.ERROR.value self.assertEqual(element.InstanceState.ERROR.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_shelve(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-shelve-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.SHELVED.value, instance0.state) message = self.load_message('instance-unshelve-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_shutdown_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-shutdown-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.STOPPED.value, instance0.state) def test_nova_instance_suspend_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-suspend-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual( element.InstanceState.SUSPENDED.value, instance0.state) def test_info_no_cdm(self): # Tests that a notification is received before an audit has been # performed which would create the nova CDM. mock_collector = mock.Mock(cluster_data_model=None) handler = novanotification.VersionedNotification(mock_collector) payload = { 'nova_object.data': { 'uuid': '9966d6bd-a45c-4e1c-9d57-3054899a3ec7', 'host': None } } with mock.patch.object(handler, 'update_instance') as update_instance: handler.info(mock.sentinel.ctxt, 'publisher_id', 'instance.update', payload, metadata={}) # update_instance should not be called since we did not add an # Instance object to the CDM since the CDM does not exist yet. update_instance.assert_not_called() def test_fake_instance_create(self): self.fake_cdmc.cluster_data_model = mock.Mock() handler = novanotification.VersionedNotification(self.fake_cdmc) message = self.load_message('instance-create-end.json') # get_instance_by_uuid should not be called when creating instance with mock.patch.object(self.fake_cdmc.cluster_data_model, 'get_instance_by_uuid') as mock_get: handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) mock_get.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/test_element.py0000664000175000017500000001300500000000000026737 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.model import element from watcher.tests import base class TestElement(base.TestCase): scenarios = [ ("ComputeNode_with_all_fields", dict( cls=element.Instance, data={ 'uuid': 'FAKE_UUID', 'state': 'state', 'hostname': 'hostname', 'memory': 111, 'vcpus': 222, 'disk': 333, })), ("ComputeNode_with_some_fields", dict( cls=element.Instance, data={ 'uuid': 'FAKE_UUID', 'state': 'state', 'vcpus': 222, 'disk': 333, })), ("Instance_with_all_fields", dict( cls=element.Instance, data={ 'uuid': 'FAKE_UUID', 'state': 'state', 'hostname': 'hostname', 'name': 'name', 'memory': 111, 'vcpus': 222, 'disk': 333, })), ("Instance_with_some_fields", dict( cls=element.Instance, data={ 'uuid': 'FAKE_UUID', 'state': 'state', 'vcpus': 222, 'disk': 333, })), ] def test_as_xml_element(self): el = self.cls(**self.data) el.as_xml_element() class TestStorageElement(base.TestCase): scenarios = [ ("StorageNode_with_all_fields", dict( cls=element.StorageNode, data={ 'host': 'host@backend', 'zone': 'zone', 'status': 'enabled', 'state': 'up', 'volume_type': ['volume_type'], })), ("Pool_with_all_fields", dict( cls=element.Pool, data={ 'name': 'host@backend#pool', 'total_volumes': 1, 'total_capacity_gb': 500, 'free_capacity_gb': 420, 'provisioned_capacity_gb': 80, 'allocated_capacity_gb': 80, 'virtual_free': 420, })), ("Pool_without_virtual_free_fields", dict( cls=element.Pool, data={ 'name': 'host@backend#pool', 'total_volumes': 1, 'total_capacity_gb': 500, 'free_capacity_gb': 420, 'provisioned_capacity_gb': 80, 'allocated_capacity_gb': 80, })), ("Volume_with_all_fields", dict( cls=element.Volume, data={ 'uuid': 'FAKE_UUID', 'size': 1, 'status': 'in-use', 'attachments': '[{"key": "value"}]', 'name': 'name', 'multiattach': 'false', 'snapshot_id': '', 'project_id': '8ea272ec-52d2-475e-9151-0f3ed8c674d1', 'metadata': '{"key": "value"}', 'bootable': 'false', 'human_id': 'human_id', })), ("Volume_without_bootable_fields", dict( cls=element.Volume, data={ 'uuid': 'FAKE_UUID', 'size': 1, 'status': 'in-use', 'attachments': '[]', 'name': 'name', 'multiattach': 'false', 'snapshot_id': '', 'project_id': '777d7968-9b61-4cc0-844d-a95a6fc22d8c', 'metadata': '{"key": "value"}', 'human_id': 'human_id', })), ("Volume_without_human_id_fields", dict( cls=element.Volume, data={ 'uuid': 'FAKE_UUID', 'size': 1, 'status': 'in-use', 'attachments': '[]', 'name': 'name', 'multiattach': 'false', 'snapshot_id': '', 'project_id': '2e65af64-1898-4cee-bfee-af3fc7f76d16', 'metadata': '{"key": "value"}', })), ] def test_as_xml_element(self): el = self.cls(**self.data) el.as_xml_element() class TestIronicElement(base.TestCase): scenarios = [ ("IronicNode_with_all_fields", dict( cls=element.IronicNode, data={ "uuid": 'FAKE_UUID', "power_state": 'up', "maintenance": "false", "maintenance_reason": "null", "extra": {"compute_node_id": 1} })), ("IronicNode_with_some_fields", dict( cls=element.IronicNode, data={ "uuid": 'FAKE_UUID', "power_state": 'up', "maintenance": "false", "extra": {"compute_node_id": 1} })), ] def test_as_xml_element(self): el = self.cls(**self.data) el.as_xml_element() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/model/test_model.py0000664000175000017500000005027700000000000026422 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from unittest import mock from oslo_utils import uuidutils from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.model import model_root from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state class TestModel(base.TestCase): def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return model_root.ModelRoot.from_xml(self.load_data(filename)) def test_model_structure(self): fake_cluster = faker_cluster_state.FakerModelCollector() model1 = fake_cluster.build_scenario_1() self.assertEqual(5, len(model1.get_all_compute_nodes())) self.assertEqual(35, len(model1.get_all_instances())) self.assertEqual(8, len(model1.edges())) expected_struct_str = self.load_data('scenario_1.xml') model2 = model_root.ModelRoot.from_xml(expected_struct_str) self.assertTrue(model_root.ModelRoot.is_isomorphic(model2, model1)) def test_build_model_from_xml(self): fake_cluster = faker_cluster_state.FakerModelCollector() expected_model = fake_cluster.generate_scenario_1() struct_str = self.load_data('scenario_1.xml') model = model_root.ModelRoot.from_xml(struct_str) self.assertEqual(expected_model.to_string(), model.to_string()) @mock.patch.object(model_root.ModelRoot, 'get_all_compute_nodes') @mock.patch.object(model_root.ModelRoot, 'get_node_instances') def test_get_model_to_list(self, mock_instances, mock_nodes): fake_compute_node = mock.MagicMock( uuid='fake_node_uuid', fields=['uuid']) fake_instance = mock.MagicMock( uuid='fake_instance_uuid', fields=['uuid']) mock_nodes.return_value = {'fake_node_uuid': fake_compute_node} mock_instances.return_value = [fake_instance] expected_keys = ['server_uuid', 'node_uuid'] result = model_root.ModelRoot().to_list() self.assertEqual(1, len(result)) result_keys = result[0].keys() self.assertEqual(sorted(expected_keys), sorted(result_keys)) # test compute node has no instance mock_instances.return_value = [] expected_keys = ['node_uuid'] result = model_root.ModelRoot().to_list() self.assertEqual(1, len(result)) result_keys = result[0].keys() self.assertEqual(expected_keys, list(result_keys)) def test_get_node_by_instance_uuid(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) self.assertEqual(node, model.get_node_by_uuid(uuid_)) uuid_ = "{0}".format(uuidutils.generate_uuid()) instance = element.Instance(id=1) instance.uuid = uuid_ model.add_instance(instance) self.assertEqual(instance, model.get_instance_by_uuid(uuid_)) model.map_instance(instance, node) self.assertEqual(node, model.get_node_by_instance_uuid(instance.uuid)) def test_add_node(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) self.assertEqual(node, model.get_node_by_uuid(uuid_)) def test_delete_node(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) self.assertEqual(node, model.get_node_by_uuid(uuid_)) model.remove_node(node) self.assertRaises(exception.ComputeNodeNotFound, model.get_node_by_uuid, uuid_) def test_get_all_compute_nodes(self): model = model_root.ModelRoot() for id_ in range(10): uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id_) node.uuid = uuid_ model.add_node(node) all_nodes = model.get_all_compute_nodes() for uuid_ in all_nodes: node = model.get_node_by_uuid(uuid_) model.assert_node(node) def test_set_get_state_nodes(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) self.assertIn(node.state, [el.value for el in element.ServiceState]) node = model.get_node_by_uuid(uuid_) node.state = element.ServiceState.OFFLINE.value self.assertIn(node.state, [el.value for el in element.ServiceState]) def test_get_node_by_name(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) name = 'test_node' node = element.ComputeNode() node.uuid = uuid_ node.hostname = name model.add_node(node) compute_node = model.get_node_by_name(name) model.assert_node(compute_node) self.assertEqual(name, compute_node['hostname']) self.assertEqual(uuid_, compute_node['uuid']) def test_node_from_name_raise(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) name = 'test_node' node = element.ComputeNode() node.uuid = uuid_ node.hostname = name model.add_node(node) fake_name = 'fake_node' self.assertRaises(exception.ComputeNodeNotFound, model.get_node_by_name, fake_name) def test_node_from_uuid_raise(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) uuid2 = "{0}".format(uuidutils.generate_uuid()) self.assertRaises(exception.ComputeNodeNotFound, model.get_node_by_uuid, uuid2) def test_remove_node_raise(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) uuid2 = "{0}".format(uuidutils.generate_uuid()) node2 = element.ComputeNode(id=2) node2.uuid = uuid2 self.assertRaises(exception.ComputeNodeNotFound, model.remove_node, node2) def test_assert_node_raise(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) self.assertRaises(exception.IllegalArgumentException, model.assert_node, "objet_qcq") def test_instance_from_uuid_raise(self): fake_cluster = faker_cluster_state.FakerModelCollector() model = fake_cluster.generate_scenario_1() self.assertRaises(exception.InstanceNotFound, model.get_instance_by_uuid, "valeur_qcq") def test_assert_instance_raise(self): model = model_root.ModelRoot() self.assertRaises(exception.IllegalArgumentException, model.assert_instance, "valeur_qcq") def test_get_node_instances(self): fake_cluster = faker_cluster_state.FakerModelCollector() model = fake_cluster.generate_scenario_1() node = element.ComputeNode(uuid="Node_0") instance0 = model.get_instance_by_uuid("INSTANCE_0") instance1 = model.get_instance_by_uuid("INSTANCE_1") instances = model.get_node_instances(node) self.assertEqual(2, len(instances)) self.assertIn(instance0, instances) self.assertIn(instance1, instances) def test_get_node_used_resources(self): fake_cluster = faker_cluster_state.FakerModelCollector() model = fake_cluster.generate_scenario_1() node = element.ComputeNode(uuid="Node_0") resources_used = model.get_node_used_resources(node) self.assertEqual(20, resources_used.get('vcpu')) self.assertEqual(4, resources_used.get('memory')) self.assertEqual(40, resources_used.get('disk')) def test_get_node_free_resources(self): fake_cluster = faker_cluster_state.FakerModelCollector() model = fake_cluster.generate_scenario_1() node = model.get_node_by_uuid("Node_0") resources_free = model.get_node_free_resources(node) self.assertEqual(20, resources_free.get('vcpu')) self.assertEqual(128, resources_free.get('memory')) self.assertEqual(210, resources_free.get('disk')) class TestStorageModel(base.TestCase): def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return model_root.StorageModelRoot.from_xml(self.load_data(filename)) def test_model_structure(self): fake_cluster = faker_cluster_state.FakerStorageModelCollector() model1 = fake_cluster.build_scenario_1() self.assertEqual(2, len(model1.get_all_storage_nodes())) self.assertEqual(9, len(model1.get_all_volumes())) self.assertEqual(12, len(model1.edges())) expected_struct_str = self.load_data('storage_scenario_1.xml') model2 = model_root.StorageModelRoot.from_xml(expected_struct_str) self.assertTrue( model_root.StorageModelRoot.is_isomorphic(model2, model1)) def test_build_model_from_xml(self): fake_cluster = faker_cluster_state.FakerStorageModelCollector() expected_model = fake_cluster.generate_scenario_1() struct_str = self.load_data('storage_scenario_1.xml') model = model_root.StorageModelRoot.from_xml(struct_str) self.assertEqual(expected_model.to_string(), model.to_string()) def test_assert_node_raise(self): model = model_root.StorageModelRoot() node = element.StorageNode(host="host@backend") model.add_node(node) self.assertRaises(exception.IllegalArgumentException, model.assert_node, "obj") def test_assert_pool_raise(self): model = model_root.StorageModelRoot() pool = element.Pool(name="host@backend#pool") model.add_pool(pool) self.assertRaises(exception.IllegalArgumentException, model.assert_pool, "obj") def test_assert_volume_raise(self): model = model_root.StorageModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertRaises(exception.IllegalArgumentException, model.assert_volume, "obj") def test_add_node(self): model = model_root.StorageModelRoot() hostname = "host@backend" node = element.StorageNode(host=hostname) model.add_node(node) self.assertEqual(node, model.get_node_by_name(hostname)) def test_add_pool(self): model = model_root.StorageModelRoot() pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) def test_remove_node(self): model = model_root.StorageModelRoot() hostname = "host@backend" node = element.StorageNode(host=hostname) model.add_node(node) self.assertEqual(node, model.get_node_by_name(hostname)) model.remove_node(node) self.assertRaises(exception.StorageNodeNotFound, model.get_node_by_name, hostname) def test_remove_pool(self): model = model_root.StorageModelRoot() pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) model.remove_pool(pool) self.assertRaises(exception.PoolNotFound, model.get_pool_by_pool_name, pool_name) def test_map_unmap_pool(self): model = model_root.StorageModelRoot() hostname = "host@backend" node = element.StorageNode(host=hostname) model.add_node(node) self.assertEqual(node, model.get_node_by_name(hostname)) pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) model.map_pool(pool, node) self.assertTrue(pool.name in model.predecessors(node.host)) model.unmap_pool(pool, node) self.assertFalse(pool.name in model.predecessors(node.host)) def test_add_volume(self): model = model_root.StorageModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) def test_remove_volume(self): model = model_root.StorageModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) model.remove_volume(volume) self.assertRaises(exception.VolumeNotFound, model.get_volume_by_uuid, uuid_) def test_map_unmap_volume(self): model = model_root.StorageModelRoot() pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) model.map_volume(volume, pool) self.assertTrue(volume.uuid in model.predecessors(pool.name)) model.unmap_volume(volume, pool) self.assertFalse(volume.uuid in model.predecessors(pool.name)) def test_get_all_storage_nodes(self): model = model_root.StorageModelRoot() for i in range(10): hostname = "host_{0}".format(i) node = element.StorageNode(host=hostname) model.add_node(node) all_nodes = model.get_all_storage_nodes() for hostname in all_nodes: node = model.get_node_by_name(hostname) model.assert_node(node) def test_get_all_volumes(self): model = model_root.StorageModelRoot() for id_ in range(10): uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) all_volumes = model.get_all_volumes() for vol in all_volumes: volume = model.get_volume_by_uuid(vol) model.assert_volume(volume) def test_get_node_pools(self): model = model_root.StorageModelRoot() hostname = "host@backend" node = element.StorageNode(host=hostname) model.add_node(node) self.assertEqual(node, model.get_node_by_name(hostname)) pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) model.map_pool(pool, node) self.assertEqual([pool], model.get_node_pools(node)) def test_get_pool_by_volume(self): model = model_root.StorageModelRoot() pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) model.map_volume(volume, pool) self.assertEqual(pool, model.get_pool_by_volume(volume)) def test_get_pool_volumes(self): model = model_root.StorageModelRoot() pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) model.map_volume(volume, pool) self.assertEqual([volume], model.get_pool_volumes(pool)) class TestBaremetalModel(base.TestCase): def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return model_root.StorageModelRoot.from_xml(self.load_data(filename)) def test_model_structure(self): fake_cluster = faker_cluster_state.FakerBaremetalModelCollector() model1 = fake_cluster.build_scenario_1() self.assertEqual(2, len(model1.get_all_ironic_nodes())) expected_struct_str = self.load_data('ironic_scenario_1.xml') model2 = model_root.BaremetalModelRoot.from_xml(expected_struct_str) self.assertTrue( model_root.BaremetalModelRoot.is_isomorphic(model2, model1)) def test_build_model_from_xml(self): fake_cluster = faker_cluster_state.FakerBaremetalModelCollector() expected_model = fake_cluster.generate_scenario_1() struct_str = self.load_data('ironic_scenario_1.xml') model = model_root.BaremetalModelRoot.from_xml(struct_str) self.assertEqual(expected_model.to_string(), model.to_string()) def test_assert_node_raise(self): model = model_root.BaremetalModelRoot() node_uuid = uuidutils.generate_uuid() node = element.IronicNode(uuid=node_uuid) model.add_node(node) self.assertRaises(exception.IllegalArgumentException, model.assert_node, "obj") def test_add_node(self): model = model_root.BaremetalModelRoot() node_uuid = uuidutils.generate_uuid() node = element.IronicNode(uuid=node_uuid) model.add_node(node) self.assertEqual(node, model.get_node_by_uuid(node_uuid)) def test_remove_node(self): model = model_root.BaremetalModelRoot() node_uuid = uuidutils.generate_uuid() node = element.IronicNode(uuid=node_uuid) model.add_node(node) self.assertEqual(node, model.get_node_by_uuid(node_uuid)) model.remove_node(node) self.assertRaises(exception.IronicNodeNotFound, model.get_node_by_uuid, node_uuid) def test_get_all_ironic_nodes(self): model = model_root.BaremetalModelRoot() for i in range(10): node_uuid = uuidutils.generate_uuid() node = element.IronicNode(uuid=node_uuid) model.add_node(node) all_nodes = model.get_all_ironic_nodes() for node_uuid in all_nodes: node = model.get_node_by_uuid(node_uuid) model.assert_node(node) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6551352 python_watcher-14.0.0/watcher/tests/decision_engine/planner/0000775000175000017500000000000000000000000024235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/planner/__init__.py0000664000175000017500000000000000000000000026334 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/planner/test_node_resource_consolidation.py0000664000175000017500000002446500000000000033442 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.decision_engine.planner import \ node_resource_consolidation as pbase from watcher.decision_engine.solution import default as dsol from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils as db_utils from watcher.tests.objects import utils as obj_utils class TestActionScheduling(base.DbTestCase): def setUp(self): super(TestActionScheduling, self).setUp() self.goal = db_utils.create_test_goal(name="server_consolidation") self.strategy = db_utils.create_test_strategy( name="node_resource_consolidation") self.audit = db_utils.create_test_audit( uuid=utils.generate_uuid(), strategy_id=self.strategy.id) self.planner = pbase.NodeResourceConsolidationPlanner(mock.Mock()) def test_schedule_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "host1", "destination_node": "host2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) with mock.patch.object( pbase.NodeResourceConsolidationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_action.call_count) filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("migrate", actions[0].action_type) def test_schedule_two_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) server1_uuid = "b199db0c-1408-4d52-b5a5-5ca14de0ff36" server2_uuid = "b199db0c-1408-4d52-b5a5-5ca14de0ff37" solution.add_action(action_type="migrate", resource_id=server1_uuid, input_parameters={ "source_node": "host1", "destination_node": "host2", }) solution.add_action(action_type="migrate", resource_id=server2_uuid, input_parameters={ "source_node": "host1", "destination_node": "host3", }) with mock.patch.object( pbase.NodeResourceConsolidationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(2, m_create_action.call_count) # check order filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual( server1_uuid, actions[0]['input_parameters'].get('resource_id')) self.assertEqual( server2_uuid, actions[1]['input_parameters'].get('resource_id')) self.assertIn(actions[0]['uuid'], actions[1]['parents']) def test_schedule_actions_with_unknown_action(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "src_uuid_node": "host1", "dst_uuid_node": "host2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="new_action_type", resource_id="", input_parameters={}) with mock.patch.object( pbase.NodeResourceConsolidationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: self.assertRaises( exception.UnsupportedActionType, self.planner.schedule, self.context, self.audit.id, solution) self.assertEqual(2, m_create_action.call_count) def test_schedule_migrate_change_state_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) solution.add_action(action_type="change_nova_service_state", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters={"state": "disabled"}) solution.add_action(action_type="change_nova_service_state", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff37", input_parameters={"state": "disabled"}) solution.add_action(action_type="migrate", resource_id="f6416850-da28-4047-a547-8c49f53e95fe", input_parameters={"source_node": "host1"}) solution.add_action(action_type="migrate", resource_id="bb404e74-2caf-447b-bd1e-9234db386ca5", input_parameters={"source_node": "host2"}) solution.add_action(action_type="migrate", resource_id="f6416850-da28-4047-a547-8c49f53e95ff", input_parameters={"source_node": "host1"}) solution.add_action(action_type="change_nova_service_state", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters={"state": "enabled"}) solution.add_action(action_type="change_nova_service_state", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff37", input_parameters={"state": "enabled"}) with mock.patch.object( pbase.NodeResourceConsolidationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(7, m_create_action.call_count) # check order filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("change_nova_service_state", actions[0].action_type) self.assertEqual("change_nova_service_state", actions[1].action_type) self.assertEqual("migrate", actions[2].action_type) self.assertEqual("migrate", actions[3].action_type) self.assertEqual("migrate", actions[4].action_type) self.assertEqual("change_nova_service_state", actions[5].action_type) self.assertEqual("change_nova_service_state", actions[6].action_type) action0_uuid = actions[0]['uuid'] action1_uuid = actions[1]['uuid'] action2_uuid = actions[2]['uuid'] action3_uuid = actions[3]['uuid'] action4_uuid = actions[4]['uuid'] action5_uuid = actions[5]['uuid'] action6_uuid = actions[6]['uuid'] # parents of action3,4,5 are action0,1 # resource2 and 4 have the same source, # so action about resource4 depends on # action about resource2 parents = [] for action in actions: if action.parents: parents.extend(action.parents) self.assertIn(action0_uuid, parents) self.assertIn(action1_uuid, parents) self.assertIn(action2_uuid, parents) self.assertIn(action3_uuid, parents) self.assertIn(action4_uuid, parents) self.assertNotIn(action5_uuid, parents) self.assertNotIn(action6_uuid, parents) class TestDefaultPlanner(base.DbTestCase): def setUp(self): super(TestDefaultPlanner, self).setUp() self.planner = pbase.NodeResourceConsolidationPlanner(mock.Mock()) self.goal = obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy( self.context, goal_id=self.goal.id) obj_utils.create_test_audit_template( self.context, goal_id=self.goal.id, strategy_id=self.strategy.id) p = mock.patch.object(db_api.BaseConnection, 'create_action_plan') self.mock_create_action_plan = p.start() self.mock_create_action_plan.side_effect = ( self._simulate_action_plan_create) self.addCleanup(p.stop) q = mock.patch.object(db_api.BaseConnection, 'create_action') self.mock_create_action = q.start() self.mock_create_action.side_effect = ( self._simulate_action_create) self.addCleanup(q.stop) def _simulate_action_plan_create(self, action_plan): action_plan.create() return action_plan def _simulate_action_create(self, action): action.create() return action @mock.patch.object(objects.Strategy, 'get_by_name') def test_scheduler_warning_empty_action_plan(self, m_get_by_name): m_get_by_name.return_value = self.strategy audit = db_utils.create_test_audit( goal_id=self.goal.id, strategy_id=self.strategy.id) fake_solution = mock.MagicMock(efficacy_indicators=[], actions=[]) action_plan = self.planner.schedule( self.context, audit.id, fake_solution) self.assertIsNotNone(action_plan.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/planner/test_planner_manager.py0000664000175000017500000000215600000000000031003 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.decision_engine.planner import manager as planner from watcher.decision_engine.planner import weight from watcher.tests import base class TestPlannerManager(base.TestCase): def test_load(self): cfg.CONF.set_override('planner', "weight", group='watcher_planner') manager = planner.PlannerManager() selected_planner = cfg.CONF.watcher_planner.planner self.assertIsInstance(manager.load(selected_planner), weight.WeightPlanner) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/planner/test_weight_planner.py0000664000175000017500000012503300000000000030660 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.common import nova_helper from watcher.common import utils from watcher.db import api as db_api from watcher.decision_engine.planner import weight as pbase from watcher.decision_engine.solution import default as dsol from watcher.decision_engine.strategy import strategies from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils as db_utils from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model import gnocchi_metrics as fake from watcher.tests.objects import utils as obj_utils class SolutionFaker(object): @staticmethod def build(): metrics = fake.FakerMetricsCollector() current_state_cluster = faker_cluster_state.FakerModelCollector() sercon = strategies.BasicConsolidation(config=mock.Mock()) sercon.compute_model = current_state_cluster.generate_scenario_1() sercon.gnocchi = mock.MagicMock( get_statistics=metrics.mock_get_statistics) return sercon.execute() class SolutionFakerSingleHyp(object): @staticmethod def build(): metrics = fake.FakerMetricsCollector() current_state_cluster = faker_cluster_state.FakerModelCollector() sercon = strategies.BasicConsolidation(config=mock.Mock()) sercon.compute_model = ( current_state_cluster.generate_scenario_3_with_2_nodes()) sercon.gnocchi = mock.MagicMock( get_statistics=metrics.mock_get_statistics) return sercon.execute() class TestActionScheduling(base.DbTestCase): def setUp(self): super(TestActionScheduling, self).setUp() self.goal = db_utils.create_test_goal(name="dummy") self.strategy = db_utils.create_test_strategy(name="dummy") self.audit = db_utils.create_test_audit( uuid=utils.generate_uuid(), strategy_id=self.strategy.id) self.planner = pbase.WeightPlanner( mock.Mock( weights={ 'turn_host_to_acpi_s3_state': 10, 'resize': 20, 'migrate': 30, 'sleep': 40, 'change_nova_service_state': 50, 'nop': 60, 'new_action_type': 70, }, parallelization={ 'turn_host_to_acpi_s3_state': 2, 'resize': 2, 'migrate': 2, 'sleep': 1, 'change_nova_service_state': 1, 'nop': 1, 'new_action_type': 70, })) @mock.patch.object(utils, "generate_uuid") def test_schedule_actions(self, m_generate_uuid): m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", "33333333-3333-3333-3333-333333333333", # "44444444-4444-4444-4444-444444444444", # "55555555-5555-5555-5555-555555555555", # "66666666-6666-6666-6666-666666666666", # "77777777-7777-7777-7777-777777777777", # "88888888-8888-8888-8888-888888888888", # "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) self.planner.config.weights = {'migrate': 3} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = [] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_two_actions(self, m_generate_uuid): m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", "22222222-2222-2222-2222-222222222222", "33333333-3333-3333-3333-333333333333", "44444444-4444-4444-4444-444444444444", # Migrate 1 "55555555-5555-5555-5555-555555555555", # Nop 1 ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) # We create the migrate action before but we then schedule # after the nop action solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="nop", input_parameters={"message": "Hello world"}) self.planner.config.weights = {'migrate': 3, 'nop': 5} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'nop', 'parents': [], 'uuid': '55555555-5555-5555-5555-555555555555'}, {'action_type': 'migrate', 'parents': ['55555555-5555-5555-5555-555555555555'], 'uuid': '44444444-4444-4444-4444-444444444444'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_actions_with_unknown_action(self, m_generate_uuid): m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # new_action_type "33333333-3333-3333-3333-333333333333", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "src_uuid_node": "server1", "dst_uuid_node": "server2", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="new_action_type", resource_id="", input_parameters={}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'new_action_type', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'migrate', 'parents': ['22222222-2222-2222-2222-222222222222'], 'uuid': '11111111-1111-1111-1111-111111111111'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") @mock.patch.object(nova_helper.NovaHelper, 'get_instance_by_uuid') def test_schedule_migrate_resize_actions(self, m_nova, m_generate_uuid): m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Migrate 4 "55555555-5555-5555-5555-555555555555", # Migrate 5 "66666666-6666-6666-6666-666666666666", # Resize 1 "77777777-7777-7777-7777-777777777777", # Resize 2 "88888888-8888-8888-8888-888888888888", # Nop "99999999-9999-9999-9999-999999999999", ] m_nova.return_value = 'server1' solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server1", "destination_node": "server2", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={"flavor": "x1"}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111'], 'uuid': '22222222-2222-2222-2222-222222222222'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_3_migrate_1_resize_1_acpi_actions_1_swimlane( self, m_generate_uuid): self.planner.config.parallelization["migrate"] = 1 m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Resize "55555555-5555-5555-5555-555555555555", # ACPI "66666666-6666-6666-6666-666666666666", "77777777-7777-7777-7777-777777777777", "88888888-8888-8888-8888-888888888888", "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server0", "destination_node": "server1", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server2", "destination_node": "server3"}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="turn_host_to_acpi_s3_state", resource_id="server1", input_parameters={}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111'], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'migrate', 'parents': ['22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111'], 'uuid': '22222222-2222-2222-2222-222222222222'}), ({'action_type': 'resize', 'parents': ['33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'}, {'action_type': 'turn_host_to_acpi_s3_state', 'parents': ['44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': ['22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'resize', 'parents': ['33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_migrate_resize_acpi_actions_2_swimlanes( self, m_generate_uuid): self.planner.config.parallelization["migrate"] = 2 m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Resize "55555555-5555-5555-5555-555555555555", # ACPI "66666666-6666-6666-6666-666666666666", "77777777-7777-7777-7777-777777777777", "88888888-8888-8888-8888-888888888888", "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server0", "destination_node": "server1", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server2", "destination_node": "server3"}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="turn_host_to_acpi_s3_state", resource_id="server1", input_parameters={}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}), ({'action_type': 'resize', 'parents': ['33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'}, {'action_type': 'turn_host_to_acpi_s3_state', 'parents': ['44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}), ({'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'resize', 'parents': ['33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_migrate_resize_acpi_actions_3_swimlanes( self, m_generate_uuid): self.planner.config.parallelization["migrate"] = 3 m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Resize "55555555-5555-5555-5555-555555555555", # ACPI "66666666-6666-6666-6666-666666666666", "77777777-7777-7777-7777-777777777777", "88888888-8888-8888-8888-888888888888", "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server0", "destination_node": "server1", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server2", "destination_node": "server3"}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="turn_host_to_acpi_s3_state", resource_id="server1", input_parameters={}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'}, {'action_type': 'turn_host_to_acpi_s3_state', 'parents': ['44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_three_migrate_two_resize_actions( self, m_generate_uuid): self.planner.config.parallelization["migrate"] = 3 self.planner.config.parallelization["resize"] = 2 m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Resize "55555555-5555-5555-5555-555555555555", # ACPI "66666666-6666-6666-6666-666666666666", "77777777-7777-7777-7777-777777777777", "88888888-8888-8888-8888-888888888888", "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server0", "destination_node": "server1", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server2", "destination_node": "server3"}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="resize", resource_id="b189db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters={'flavor': 'x1'}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_5_migrate_2_resize_actions_for_2_swimlanes( self, m_generate_uuid): self.planner.config.parallelization["migrate"] = 2 self.planner.config.parallelization["resize"] = 2 m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Migrate 4 "55555555-5555-5555-5555-555555555555", # Migrate 5 "66666666-6666-6666-6666-666666666666", # Resize 1 "77777777-7777-7777-7777-777777777777", # Resize 2 "88888888-8888-8888-8888-888888888888", # Nop "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server6"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server2", "destination_node": "server6"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server3", "destination_node": "server6"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server4", "destination_node": "server6"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server5", "destination_node": "server6"}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x2'}) solution.add_action(action_type="turn_host_to_acpi_s3_state", resource_id="DOESNOTMATTER") with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '44444444-4444-4444-4444-444444444444'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '44444444-4444-4444-4444-444444444444'}), ({'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'migrate', 'parents': ['33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '44444444-4444-4444-4444-444444444444'}, {'action_type': 'migrate', 'parents': ['33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': ['33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}, {'action_type': 'resize', 'parents': ['55555555-5555-5555-5555-555555555555'], 'uuid': '66666666-6666-6666-6666-666666666666'}), ({'action_type': 'migrate', 'parents': ['33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}, {'action_type': 'resize', 'parents': ['55555555-5555-5555-5555-555555555555'], 'uuid': '77777777-7777-7777-7777-777777777777'}), ({'action_type': 'resize', 'parents': ['55555555-5555-5555-5555-555555555555'], 'uuid': '66666666-6666-6666-6666-666666666666'}, {'action_type': 'turn_host_to_acpi_s3_state', 'parents': ['66666666-6666-6666-6666-666666666666', '77777777-7777-7777-7777-777777777777'], 'uuid': '88888888-8888-8888-8888-888888888888'}), ({'action_type': 'resize', 'parents': ['55555555-5555-5555-5555-555555555555'], 'uuid': '77777777-7777-7777-7777-777777777777'}, {'action_type': 'turn_host_to_acpi_s3_state', 'parents': ['66666666-6666-6666-6666-666666666666', '77777777-7777-7777-7777-777777777777'], 'uuid': '88888888-8888-8888-8888-888888888888'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) class TestWeightPlanner(base.DbTestCase): def setUp(self): super(TestWeightPlanner, self).setUp() self.planner = pbase.WeightPlanner(mock.Mock()) self.planner.config.weights = { 'nop': 0, 'sleep': 1, 'change_nova_service_state': 2, 'migrate': 3 } self.goal = obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy( self.context, goal_id=self.goal.id) obj_utils.create_test_audit_template( self.context, goal_id=self.goal.id, strategy_id=self.strategy.id) p = mock.patch.object(db_api.BaseConnection, 'create_action_plan') self.mock_create_action_plan = p.start() self.mock_create_action_plan.side_effect = ( self._simulate_action_plan_create) self.addCleanup(p.stop) q = mock.patch.object(db_api.BaseConnection, 'create_action') self.mock_create_action = q.start() self.mock_create_action.side_effect = ( self._simulate_action_create) self.addCleanup(q.stop) def _simulate_action_plan_create(self, action_plan): action_plan.create() return action_plan def _simulate_action_create(self, action): action.create() return action @mock.patch.object(objects.Strategy, 'get_by_name') def test_scheduler_warning_empty_action_plan(self, m_get_by_name): m_get_by_name.return_value = self.strategy audit = db_utils.create_test_audit( goal_id=self.goal.id, strategy_id=self.strategy.id) fake_solution = mock.MagicMock(efficacy_indicators=[], actions=[]) action_plan = self.planner.schedule( self.context, audit.id, fake_solution) self.assertIsNotNone(action_plan.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/planner/test_workload_stabilization_planner.py0000664000175000017500000003761400000000000034156 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.common import exception from watcher.common import nova_helper from watcher.common import utils from watcher.db import api as db_api from watcher.decision_engine.planner import workload_stabilization as pbase from watcher.decision_engine.solution import default as dsol from watcher.decision_engine.strategy import strategies from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils as db_utils from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model import gnocchi_metrics as fake from watcher.tests.objects import utils as obj_utils class SolutionFaker(object): @staticmethod def build(): metrics = fake.FakerMetricsCollector() current_state_cluster = faker_cluster_state.FakerModelCollector() sercon = strategies.BasicConsolidation(config=mock.Mock()) sercon._compute_model = current_state_cluster.generate_scenario_1() sercon.gnocchi = mock.MagicMock( get_statistics=metrics.mock_get_statistics) return sercon.execute() class SolutionFakerSingleHyp(object): @staticmethod def build(): metrics = fake.FakerMetricsCollector() current_state_cluster = faker_cluster_state.FakerModelCollector() sercon = strategies.BasicConsolidation(config=mock.Mock()) sercon._compute_model = ( current_state_cluster.generate_scenario_3_with_2_nodes()) sercon.gnocchi = mock.MagicMock( get_statistics=metrics.mock_get_statistics) return sercon.execute() class TestActionScheduling(base.DbTestCase): def setUp(self): super(TestActionScheduling, self).setUp() self.goal = db_utils.create_test_goal(name="dummy") self.strategy = db_utils.create_test_strategy(name="dummy") self.audit = db_utils.create_test_audit( uuid=utils.generate_uuid(), strategy_id=self.strategy.id) self.planner = pbase.WorkloadStabilizationPlanner(mock.Mock()) self.nova_helper = nova_helper.NovaHelper(mock.Mock()) def test_schedule_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server1", "destination_node": "server2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: self.planner.config.weights = {'migrate': 3} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_action.call_count) filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("migrate", actions[0].action_type) def test_schedule_two_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server1", "destination_node": "server2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="nop", input_parameters={"message": "Hello world"}) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: self.planner.config.weights = {'migrate': 3, 'nop': 5} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(2, m_create_action.call_count) # check order filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("nop", actions[0].action_type) self.assertEqual("migrate", actions[1].action_type) def test_schedule_actions_with_unknown_action(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "src_uuid_node": "server1", "dst_uuid_node": "server2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="new_action_type", resource_id="", input_parameters={}) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: self.planner.config.weights = {'migrate': 0} self.assertRaises(KeyError, self.planner.schedule, self.context, self.audit.id, solution) assert not m_nova.called self.assertEqual(2, m_create_action.call_count) def test_schedule_actions_with_unsupported_action(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "src_uuid_node": "server1", "dst_uuid_node": "server2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="new_action_type", resource_id="", input_parameters={}) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: self.planner.config.weights = { 'turn_host_to_acpi_s3_state': 0, 'resize': 1, 'migrate': 2, 'sleep': 3, 'change_nova_service_state': 4, 'nop': 5, 'new_action_type': 6} self.assertRaises(exception.UnsupportedActionType, self.planner.schedule, self.context, self.audit.id, solution) assert not m_nova.called self.assertEqual(2, m_create_action.call_count) @mock.patch.object(nova_helper.NovaHelper, 'get_instance_by_uuid') def test_schedule_migrate_resize_actions(self, mock_nova): mock_nova.return_value = 'server1' solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server1", "destination_node": "server2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="resize", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters={"flavor": "x1"}) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: self.planner.config.weights = {'migrate': 3, 'resize': 2} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertEqual(1, m_nova.call_count) self.assertIsNotNone(action_plan.uuid) self.assertEqual(2, m_create_action.call_count) # check order filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("migrate", actions[0].action_type) self.assertEqual("resize", actions[1].action_type) self.assertEqual(actions[0].uuid, actions[1].parents[0]) def test_schedule_migrate_resize_acpi_s3_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server1", "destination_node": "server2", } parent_migration = "b199db0c-1408-4d52-b5a5-5ca14de0ff36" solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="resize", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="migrate", resource_id="f6416850-da28-4047-a547-8c49f53e95fe", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="migrate", resource_id="bb404e74-2caf-447b-bd1e-9234db386ca5", input_parameters={"source_node": "server2", "destination_node": "server3"}) solution.add_action(action_type="turn_host_to_acpi_s3_state", resource_id="server1", input_parameters={}) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: with mock.patch.object( nova_helper, 'NovaHelper') as m_nova: m_nova().get_hostname.return_value = 'server1' m_nova().get_instance_by_uuid.return_value = ['uuid1'] self.planner.config.weights = { 'turn_host_to_acpi_s3_state': 0, 'resize': 1, 'migrate': 2, 'sleep': 3, 'change_nova_service_state': 4, 'nop': 5} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertEqual(3, m_nova.call_count) self.assertIsNotNone(action_plan.uuid) self.assertEqual(5, m_create_action.call_count) # check order filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("migrate", actions[0].action_type) self.assertEqual("migrate", actions[1].action_type) self.assertEqual("migrate", actions[2].action_type) self.assertEqual("resize", actions[3].action_type) self.assertEqual("turn_host_to_acpi_s3_state", actions[4].action_type) for action in actions: if action.input_parameters['resource_id'] == parent_migration: parent_migration = action break self.assertEqual(parent_migration.uuid, actions[3].parents[0]) class TestDefaultPlanner(base.DbTestCase): def setUp(self): super(TestDefaultPlanner, self).setUp() self.planner = pbase.WorkloadStabilizationPlanner(mock.Mock()) self.planner.config.weights = { 'nop': 0, 'sleep': 1, 'change_nova_service_state': 2, 'migrate': 3 } self.goal = obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy( self.context, goal_id=self.goal.id) obj_utils.create_test_audit_template( self.context, goal_id=self.goal.id, strategy_id=self.strategy.id) p = mock.patch.object(db_api.BaseConnection, 'create_action_plan') self.mock_create_action_plan = p.start() self.mock_create_action_plan.side_effect = ( self._simulate_action_plan_create) self.addCleanup(p.stop) q = mock.patch.object(db_api.BaseConnection, 'create_action') self.mock_create_action = q.start() self.mock_create_action.side_effect = ( self._simulate_action_create) self.addCleanup(q.stop) def _simulate_action_plan_create(self, action_plan): action_plan.create() return action_plan def _simulate_action_create(self, action): action.create() return action @mock.patch.object(objects.Strategy, 'get_by_name') def test_scheduler_warning_empty_action_plan(self, m_get_by_name): m_get_by_name.return_value = self.strategy audit = db_utils.create_test_audit( goal_id=self.goal.id, strategy_id=self.strategy.id) fake_solution = mock.MagicMock(efficacy_indicators=[], actions=[]) action_plan = self.planner.schedule( self.context, audit.id, fake_solution) self.assertIsNotNone(action_plan.uuid) class TestActionValidator(base.DbTestCase): INSTANCE_UUID = "94ae2f92-b7fd-4da7-9e97-f13504ae98c4" def setUp(self): super(TestActionValidator, self).setUp() self.r_osc_cls = mock.Mock() self.r_helper_cls = mock.Mock() self.r_helper = mock.Mock(spec=nova_helper.NovaHelper) self.r_helper_cls.return_value = self.r_helper r_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.r_helper_cls) r_nova_helper.start() self.addCleanup(r_nova_helper.stop) def test_resize_validate_parents(self): resize_object = pbase.ResizeActionValidator() action = {'uuid': 'fcec56cd-74c1-406b-a7c1-81ef9f0c1393', 'input_parameters': {'resource_id': self.INSTANCE_UUID}} resource_action_map = {self.INSTANCE_UUID: [ ('action_uuid', 'migrate')]} self.r_helper.get_hostname.return_value = 'server1' self.r_helper.get_instance_by_uuid.return_value = ['instance'] result = resize_object.validate_parents(resource_action_map, action) self.assertEqual('action_uuid', result[0]) def test_migrate_validate_parents(self): migrate_object = pbase.MigrationActionValidator() action = {'uuid': '712f1701-4c1b-4076-bfcf-3f23cfec6c3b', 'input_parameters': {'source_node': 'server1', 'resource_id': self.INSTANCE_UUID}} resource_action_map = {} expected_map = { '94ae2f92-b7fd-4da7-9e97-f13504ae98c4': [ ('712f1701-4c1b-4076-bfcf-3f23cfec6c3b', 'migrate')], 'server1': [ ('712f1701-4c1b-4076-bfcf-3f23cfec6c3b', 'migrate')]} migrate_object.validate_parents(resource_action_map, action) self.assertEqual(resource_action_map, expected_map) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6551352 python_watcher-14.0.0/watcher/tests/decision_engine/scope/0000775000175000017500000000000000000000000023707 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/scope/__init__.py0000664000175000017500000000000000000000000026006 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/scope/fake_scopes.py0000664000175000017500000000516200000000000026547 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.tests.decision_engine.model import faker_cluster_state vum = faker_cluster_state.volume_uuid_mapping fake_scope_1 = [{'compute': [{'availability_zones': [{'name': 'AZ1'}, {'name': 'AZ3'}]}, {'exclude': [ {'instances': [ {'uuid': 'INSTANCE_6'}]}, ]}] } ] compute_scope = [{'compute': [{'host_aggregates': [{'id': '*'}]}, {'availability_zones': [{'name': 'AZ1'}, {'name': 'AZ2'}]}, {'exclude': [ {'instances': [ {'uuid': 'INSTANCE_1'}, {'uuid': 'INSTANCE_2'}]}, {'compute_nodes': [ {'name': 'Node_1'}, {'name': 'Node_2'}]} ]}] } ] fake_scope_2 = [{'storage': [{'availability_zones': [{'name': 'zone_0'}]}, {'exclude': [ {'volumes': [ {'uuid': vum['volume_1']}]}, {'storage_pools': [ {'name': 'host_0@backend_0#pool_1'}]} ]}] } ] fake_scope_3 = [{'compute': [{'host_aggregates': [{'id': '1'}]}, {'exclude': [] }] } ] baremetal_scope = [ {'baremetal': [ {'exclude': [ {'ironic_nodes': [ {'uuid': 'c5941348-5a87-4016-94d4-4f9e0ce2b87a'}, {'uuid': 'c5941348-5a87-4016-94d4-4f9e0ce2b87c'} ] } ] } ] } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/scope/test_baremetal.py0000664000175000017500000000441600000000000027261 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2018 SBCloud # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock from watcher.decision_engine.scope import baremetal from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.scope import fake_scopes class TestBaremetalScope(base.TestCase): def setUp(self): super(TestBaremetalScope, self).setUp() self.fake_cluster = faker_cluster_state.FakerBaremetalModelCollector() self.audit_scope = fake_scopes.baremetal_scope def test_exclude_all_ironic_nodes(self): cluster = self.fake_cluster.generate_scenario_1() baremetal.BaremetalScope( self.audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) self.assertEqual({}, cluster.get_all_ironic_nodes()) def test_exclude_resources(self): nodes_to_exclude = [] resources = fake_scopes.baremetal_scope[0]['baremetal'][0]['exclude'] baremetal.BaremetalScope( self.audit_scope, mock.Mock(), osc=mock.Mock()).exclude_resources( resources, nodes=nodes_to_exclude) self.assertEqual(sorted(nodes_to_exclude), sorted(['c5941348-5a87-4016-94d4-4f9e0ce2b87a', 'c5941348-5a87-4016-94d4-4f9e0ce2b87c'])) def test_remove_nodes_from_model(self): cluster = self.fake_cluster.generate_scenario_1() baremetal.BaremetalScope( self.audit_scope, mock.Mock(), osc=mock.Mock()).remove_nodes_from_model( ['c5941348-5a87-4016-94d4-4f9e0ce2b87a'], cluster) self.assertEqual(len(cluster.get_all_ironic_nodes()), 1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/scope/test_compute.py0000664000175000017500000003375300000000000027007 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from jsonschema import validators from unittest import mock from watcher.api.controllers.v1 import audit_template from watcher.common import exception from watcher.common import nova_helper from watcher.decision_engine.scope import compute from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.scope import fake_scopes class TestComputeScope(base.TestCase): def setUp(self): super(TestComputeScope, self).setUp() self.fake_cluster = faker_cluster_state.FakerModelCollector() @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_get_scoped_model_with_zones_and_instances(self, mock_zone_list): cluster = self.fake_cluster.generate_scenario_1() audit_scope = fake_scopes.fake_scope_1 mock_zone_list.return_value = [ mock.Mock(zone='AZ{0}'.format(i), host={'hostname_{0}'.format(i): {}}) for i in range(4)] model = compute.ComputeScope(audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) # NOTE(adisky):INSTANCE_6 is not excluded from model it will be tagged # as 'exclude' TRUE, blueprint compute-cdm-include-all-instances expected_edges = [('INSTANCE_2', 'Node_1'), (u'INSTANCE_6', u'Node_3')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_get_scoped_model_without_scope(self, mock_zone_list): model = self.fake_cluster.generate_scenario_1() compute.ComputeScope([], mock.Mock(), osc=mock.Mock()).get_scoped_model(model) assert not mock_zone_list.called def test_remove_instance(self): model = self.fake_cluster.generate_scenario_1() compute.ComputeScope([], mock.Mock(), osc=mock.Mock()).remove_instance( model, model.get_instance_by_uuid('INSTANCE_2'), 'Node_1') expected_edges = [ ('INSTANCE_0', 'Node_0'), ('INSTANCE_1', 'Node_0'), ('INSTANCE_3', 'Node_2'), ('INSTANCE_4', 'Node_2'), ('INSTANCE_5', 'Node_2'), ('INSTANCE_6', 'Node_3'), ('INSTANCE_7', 'Node_4'), ] self.assertEqual(sorted(expected_edges), sorted(model.edges())) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_collect_aggregates(self, mock_aggregate): allowed_nodes = [] mock_aggregate.return_value = [ mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] compute.ComputeScope([{'host_aggregates': [{'id': 1}, {'id': 2}]}], mock.Mock(), osc=mock.Mock())._collect_aggregates( [{'id': 1}, {'id': 2}], allowed_nodes) self.assertEqual(['Node_1'], allowed_nodes) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_aggregates_wildcard_is_used(self, mock_aggregate): allowed_nodes = [] mock_aggregate.return_value = [ mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] compute.ComputeScope([{'host_aggregates': [{'id': '*'}]}], mock.Mock(), osc=mock.Mock())._collect_aggregates( [{'id': '*'}], allowed_nodes) self.assertEqual(['Node_0', 'Node_1'], allowed_nodes) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_aggregates_wildcard_with_other_ids(self, mock_aggregate): allowed_nodes = [] mock_aggregate.return_value = [mock.Mock(id=i) for i in range(2)] scope_handler = compute.ComputeScope( [{'host_aggregates': [{'id': '*'}, {'id': 1}]}], mock.Mock(), osc=mock.Mock()) self.assertRaises(exception.WildcardCharacterIsUsed, scope_handler._collect_aggregates, [{'id': '*'}, {'id': 1}], allowed_nodes) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_aggregates_with_names_and_ids(self, mock_aggregate): allowed_nodes = [] mock_collection = [mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] mock_collection[0].name = 'HA_0' mock_collection[1].name = 'HA_1' mock_aggregate.return_value = mock_collection compute.ComputeScope([{'host_aggregates': [{'name': 'HA_1'}, {'id': 0}]}], mock.Mock(), osc=mock.Mock())._collect_aggregates( [{'name': 'HA_1'}, {'id': 0}], allowed_nodes) self.assertEqual(['Node_0', 'Node_1'], allowed_nodes) @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_collect_zones(self, mock_zone_list): allowed_nodes = [] mock_zone_list.return_value = [ mock.Mock(zone="AZ{0}".format(i + 1), host={'Node_{0}'.format(2 * i): 1, 'Node_{0}'.format(2 * i + 1): 2}) for i in range(2)] compute.ComputeScope([{'availability_zones': [{'name': "AZ1"}]}], mock.Mock(), osc=mock.Mock())._collect_zones( [{'name': "AZ1"}], allowed_nodes) self.assertEqual(['Node_0', 'Node_1'], sorted(allowed_nodes)) @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_zones_wildcard_is_used(self, mock_zone_list): allowed_nodes = [] mock_zone_list.return_value = [ mock.Mock(zone="AZ{0}".format(i + 1), host={'Node_{0}'.format(2 * i): 1, 'Node_{0}'.format(2 * i + 1): 2}) for i in range(2)] compute.ComputeScope([{'availability_zones': [{'name': "*"}]}], mock.Mock(), osc=mock.Mock())._collect_zones( [{'name': "*"}], allowed_nodes) self.assertEqual(['Node_0', 'Node_1', 'Node_2', 'Node_3'], sorted(allowed_nodes)) @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_zones_wildcard_with_other_ids(self, mock_zone_list): allowed_nodes = [] mock_zone_list.return_value = [ mock.Mock(zone="AZ{0}".format(i + 1), host={'Node_{0}'.format(2 * i): 1, 'Node_{0}'.format(2 * i + 1): 2}) for i in range(2)] scope_handler = compute.ComputeScope( [{'availability_zones': [{'name': "*"}, {'name': 'AZ1'}]}], mock.Mock(), osc=mock.Mock()) self.assertRaises(exception.WildcardCharacterIsUsed, scope_handler._collect_zones, [{'name': "*"}, {'name': 'AZ1'}], allowed_nodes) def test_compute_schema(self): test_scope = fake_scopes.compute_scope validators.Draft4Validator( audit_template.AuditTemplatePostType._build_schema() ).validate(test_scope) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_exclude_resource(self, mock_aggregate): mock_collection = [mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] mock_collection[0].name = 'HA_0' mock_collection[1].name = 'HA_1' mock_aggregate.return_value = mock_collection resources_to_exclude = [{'host_aggregates': [{'name': 'HA_1'}, {'id': 0}]}, {'instances': [{'uuid': 'INSTANCE_1'}, {'uuid': 'INSTANCE_2'}]}, {'compute_nodes': [{'name': 'Node_2'}, {'name': 'Node_3'}]}, {'instance_metadata': [{'optimize': True}, {'optimize1': False}]}, {'projects': [{'uuid': 'PROJECT_1'}, {'uuid': 'PROJECT_2'}]}] instances_to_exclude = [] nodes_to_exclude = [] instance_metadata = [] projects_to_exclude = [] compute.ComputeScope([], mock.Mock(), osc=mock.Mock()).exclude_resources( resources_to_exclude, instances=instances_to_exclude, nodes=nodes_to_exclude, instance_metadata=instance_metadata, projects=projects_to_exclude) self.assertEqual(['Node_0', 'Node_1', 'Node_2', 'Node_3'], sorted(nodes_to_exclude)) self.assertEqual(['INSTANCE_1', 'INSTANCE_2'], sorted(instances_to_exclude)) self.assertEqual([{'optimize': True}, {'optimize1': False}], instance_metadata) self.assertEqual(['PROJECT_1', 'PROJECT_2'], sorted(projects_to_exclude)) def test_exclude_instances_with_given_metadata(self): cluster = self.fake_cluster.generate_scenario_1() instance_metadata = [{'optimize': True}] instances_to_remove = set() compute.ComputeScope( [], mock.Mock(), osc=mock.Mock()).exclude_instances_with_given_metadata( instance_metadata, cluster, instances_to_remove) self.assertEqual(sorted(['INSTANCE_' + str(i) for i in range(35)]), sorted(instances_to_remove)) instance_metadata = [{'optimize': False}] instances_to_remove = set() compute.ComputeScope( [], mock.Mock(), osc=mock.Mock()).exclude_instances_with_given_metadata( instance_metadata, cluster, instances_to_remove) self.assertEqual(set(), instances_to_remove) def test_exclude_instances_with_given_project(self): cluster = self.fake_cluster.generate_scenario_1() instances_to_exclude = set() projects_to_exclude = ['26F03131-32CB-4697-9D61-9123F87A8147', '109F7909-0607-4712-B32C-5CC6D49D2F15'] compute.ComputeScope( [], mock.Mock(), osc=mock.Mock()).exclude_instances_with_given_project( projects_to_exclude, cluster, instances_to_exclude) self.assertEqual(['INSTANCE_1', 'INSTANCE_2'], sorted(instances_to_exclude)) def test_remove_nodes_from_model(self): model = self.fake_cluster.generate_scenario_1() compute.ComputeScope([], mock.Mock(), osc=mock.Mock()).remove_nodes_from_model( ['hostname_1', 'hostname_2'], model) expected_edges = [ ('INSTANCE_0', 'Node_0'), ('INSTANCE_1', 'Node_0'), ('INSTANCE_6', 'Node_3'), ('INSTANCE_7', 'Node_4')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) def test_update_exclude_instances_in_model(self): model = self.fake_cluster.generate_scenario_1() compute.ComputeScope([], mock.Mock(), osc=mock.Mock()).update_exclude_instance_in_model( ['INSTANCE_1', 'INSTANCE_2'], model) expected_edges = [ ('INSTANCE_0', 'Node_0'), ('INSTANCE_1', 'Node_0'), ('INSTANCE_2', 'Node_1'), ('INSTANCE_3', 'Node_2'), ('INSTANCE_4', 'Node_2'), ('INSTANCE_5', 'Node_2'), ('INSTANCE_6', 'Node_3'), ('INSTANCE_7', 'Node_4')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) self.assertFalse( model.get_instance_by_uuid('INSTANCE_0').watcher_exclude) self.assertTrue( model.get_instance_by_uuid('INSTANCE_1').watcher_exclude) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_detail') @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_get_scoped_model_with_hostaggregate_null( self, mock_list, mock_detail): cluster = self.fake_cluster.generate_scenario_1() audit_scope = fake_scopes.fake_scope_3 mock_list.return_value = [mock.Mock(id=i, name="HA_{0}".format(i)) for i in range(2)] model = compute.ComputeScope(audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) self.assertEqual(0, len(model.edges())) @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_get_scoped_model_with_multi_scopes(self, mock_zone_list): cluster = self.fake_cluster.generate_scenario_1() # includes compute and storage scope audit_scope = [] audit_scope.extend(fake_scopes.fake_scope_1) audit_scope.extend(fake_scopes.fake_scope_2) mock_zone_list.return_value = [ mock.Mock(zone='AZ{0}'.format(i), host={'hostname_{0}'.format(i): {}}) for i in range(4)] model = compute.ComputeScope(audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) # NOTE(adisky):INSTANCE_6 is not excluded from model it will be tagged # as 'exclude' TRUE, blueprint compute-cdm-include-all-instances expected_edges = [('INSTANCE_2', 'Node_1'), (u'INSTANCE_6', u'Node_3')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/scope/test_storage.py0000664000175000017500000002325500000000000026773 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2018 NEC Corportion # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock from watcher.common import cinder_helper from watcher.common import exception from watcher.decision_engine.scope import storage from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.scope import fake_scopes class TestStorageScope(base.TestCase): def setUp(self): super(TestStorageScope, self).setUp() self.fake_cluster = faker_cluster_state.FakerStorageModelCollector() @mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list') def test_get_scoped_model_with_zones_pools_volumes(self, mock_zone_list): cluster = self.fake_cluster.generate_scenario_1() audit_scope = fake_scopes.fake_scope_2 mock_zone_list.return_value = [ mock.Mock(zone='zone_{0}'.format(i), host='host_{0}@backend_{1}'.format(i, i)) for i in range(2)] model = storage.StorageScope(audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) expected_edges = [(faker_cluster_state.volume_uuid_mapping['volume_0'], 'host_0@backend_0#pool_0'), ('host_0@backend_0#pool_0', 'host_0@backend_0')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) @mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list') def test_get_scoped_model_without_scope(self, mock_zone_list): cluster = self.fake_cluster.generate_scenario_1() storage.StorageScope([], mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) assert not mock_zone_list.called @mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list') def test_collect_zones(self, mock_zone_list): allowed_nodes = [] az_scope = [{'name': 'zone_1'}] mock_zone_list.return_value = [ mock.Mock(zone='zone_{0}'.format(i), host='host_{0}@backend_{1}'.format(i, i)) for i in range(2)] storage.StorageScope([{'availability _zones': az_scope}], mock.Mock(), osc=mock.Mock())._collect_zones( az_scope, allowed_nodes) self.assertEqual(['host_1@backend_1'], sorted(allowed_nodes)) # storage scope with az wildcard az_scope = [{'name': '*'}] del allowed_nodes[:] storage.StorageScope([{'availability _zones': az_scope}], mock.Mock(), osc=mock.Mock())._collect_zones( az_scope, allowed_nodes) self.assertEqual(['host_0@backend_0', 'host_1@backend_1'], sorted(allowed_nodes)) # storage scope with az wildcard and other az_scope = [{'name': '*'}, {'name': 'zone_0'}] del allowed_nodes[:] scope_handler = storage.StorageScope( [{'availability _zones': az_scope}], mock.Mock(), osc=mock.Mock()) self.assertRaises(exception.WildcardCharacterIsUsed, scope_handler._collect_zones, az_scope, allowed_nodes) @mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list') @mock.patch.object(cinder_helper.CinderHelper, 'get_volume_type_by_backendname') def test_collect_vtype(self, mock_vt_list, mock_zone_list): allowed_nodes = [] mock_zone_list.return_value = [ mock.Mock(zone='zone_{0}'.format(i), host='host_{0}@backend_{1}'.format(i, i)) for i in range(2)] def side_effect(arg): if arg == 'backend_0': return ['type_0'] else: return ['type_1'] mock_vt_list.side_effect = side_effect vt_scope = [{'name': 'type_1'}] storage.StorageScope([{'volume_types': vt_scope}], mock.Mock(), osc=mock.Mock())._collect_vtype( vt_scope, allowed_nodes) self.assertEqual(['host_1@backend_1'], sorted(allowed_nodes)) # storage scope with vt wildcard vt_scope = [{'name': '*'}] del allowed_nodes[:] storage.StorageScope([{'volume_types': vt_scope}], mock.Mock(), osc=mock.Mock())._collect_vtype( vt_scope, allowed_nodes) self.assertEqual(['host_0@backend_0', 'host_1@backend_1'], sorted(allowed_nodes)) # storage scope with vt wildcard and other vt_scope = [{'name': '*'}, {'name': 'type_0'}] del allowed_nodes[:] scope_handler = storage.StorageScope([{'volume_types': vt_scope}], mock.Mock(), osc=mock.Mock()) self.assertRaises(exception.WildcardCharacterIsUsed, scope_handler._collect_vtype, vt_scope, allowed_nodes) def test_exclude_resources(self): pools_to_exclude = [] projects_to_exclude = [] volumes_to_exclude = [] resources = [{'volumes': [{'uuid': 'VOLUME_1'}, {'uuid': 'VOLUME_2'}] }, {'storage_pools': [{'name': 'host_0@backend_0#pool_1'}, {'name': 'host_1@backend_1#pool_1'}] }, {'projects': [{'uuid': 'PROJECT_1'}, {'uuid': 'PROJECT_2'}, {'uuid': 'PROJECT_3'}] } ] storage.StorageScope(resources, mock.Mock(), osc=mock.Mock()).exclude_resources( resources, pools=pools_to_exclude, projects=projects_to_exclude, volumes=volumes_to_exclude) self.assertEqual(['VOLUME_1', 'VOLUME_2'], volumes_to_exclude) self.assertEqual(['PROJECT_1', 'PROJECT_2', 'PROJECT_3'], projects_to_exclude) self.assertEqual(['host_0@backend_0#pool_1', 'host_1@backend_1#pool_1'], pools_to_exclude) def test_exclude_volumes(self): cluster = self.fake_cluster.generate_scenario_1() exclude = [faker_cluster_state.volume_uuid_mapping['volume_0'], faker_cluster_state.volume_uuid_mapping['volume_3'], ] storage.StorageScope([], mock.Mock(), osc=mock.Mock()).exclude_volumes(exclude, cluster) self.assertNotIn(exclude[0], cluster.get_all_volumes().keys()) self.assertNotIn(exclude[1], cluster.get_all_volumes().keys()) def test_exclude_pools(self): cluster = self.fake_cluster.generate_scenario_1() exclude = ['host_0@backend_0#pool_0'] node_name = (exclude[0].split('#'))[0] storage.StorageScope([], mock.Mock(), osc=mock.Mock()).exclude_pools(exclude, cluster) node = cluster.get_node_by_name(node_name) self.assertNotIn(exclude, cluster.get_node_pools(node)) def test_exclude_projects(self): cluster = self.fake_cluster.generate_scenario_1() exclude = ['project_1', 'project_2'] storage.StorageScope([], mock.Mock(), osc=mock.Mock()).exclude_projects(exclude, cluster) projects = [] volumes = cluster.get_all_volumes() for volume_id in volumes: volume = volumes.get(volume_id) projects.append(volume.get('project_id')) self.assertNotIn(exclude[0], projects) self.assertNotIn(exclude[1], projects) def test_remove_nodes_from_model(self): cluster = self.fake_cluster.generate_scenario_1() nodes_to_remove = ['host_0@backend_0'] storage.StorageScope([], mock.Mock(), osc=mock.Mock()).remove_nodes_from_model( nodes_to_remove, cluster) self.assertEqual(['host_1@backend_1'], list(cluster.get_all_storage_nodes())) @mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list') def test_get_scoped_model_with_multi_scopes(self, mock_zone_list): cluster = self.fake_cluster.generate_scenario_1() # includes storage and compute scope audit_scope = [] audit_scope.extend(fake_scopes.fake_scope_2) audit_scope.extend(fake_scopes.fake_scope_1) mock_zone_list.return_value = [ mock.Mock(zone='zone_{0}'.format(i), host='host_{0}@backend_{1}'.format(i, i)) for i in range(2)] model = storage.StorageScope(audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) expected_edges = [(faker_cluster_state.volume_uuid_mapping['volume_0'], 'host_0@backend_0#pool_0'), ('host_0@backend_0#pool_0', 'host_0@backend_0')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6551352 python_watcher-14.0.0/watcher/tests/decision_engine/scoring/0000775000175000017500000000000000000000000024242 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/scoring/__init__.py0000664000175000017500000000000000000000000026341 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/scoring/test_dummy_scorer.py0000664000175000017500000000372200000000000030367 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from watcher.decision_engine.scoring import dummy_scorer from watcher.tests import base class TestDummyScorer(base.TestCase): def test_metadata(self): scorer = dummy_scorer.DummyScorer(config=None) self.assertEqual('dummy_scorer', scorer.get_name()) self.assertIn('Dummy', scorer.get_description()) metainfo = scorer.get_metainfo() self.assertIn('feature_columns', metainfo) self.assertIn('result_columns', metainfo) self.assertIn('workloads', metainfo) def test_calculate_score(self): scorer = dummy_scorer.DummyScorer(config=None) self._assert_result(scorer, 0, '[0, 0, 0, 0, 0, 0, 0, 0, 0]') self._assert_result(scorer, 0, '[50, 0, 0, 600, 0, 0, 0, 0, 0]') self._assert_result(scorer, 0, '[0, 0, 0, 0, 600, 0, 0, 0, 0]') self._assert_result(scorer, 1, '[85, 0, 0, 0, 0, 0, 0, 0, 0]') self._assert_result(scorer, 2, '[0, 0, 0, 1100, 1100, 0, 0, 0, 0]') self._assert_result(scorer, 3, '[0, 0, 0, 0, 0, 70000000, 70000000, 0, 0]') def _assert_result(self, scorer, expected, features): result_str = scorer.calculate_score(features) actual_result = jsonutils.loads(result_str)[0] self.assertEqual(expected, actual_result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/scoring/test_dummy_scoring_container.py0000664000175000017500000000355500000000000032604 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from watcher.decision_engine.scoring import dummy_scoring_container from watcher.tests import base class TestDummyScoringContainer(base.TestCase): def test_get_scoring_engine_list(self): scorers = (dummy_scoring_container.DummyScoringContainer .get_scoring_engine_list()) self.assertEqual(3, len(scorers)) self.assertEqual('dummy_min_scorer', scorers[0].get_name()) self.assertEqual('dummy_max_scorer', scorers[1].get_name()) self.assertEqual('dummy_avg_scorer', scorers[2].get_name()) def test_scorers(self): scorers = (dummy_scoring_container.DummyScoringContainer .get_scoring_engine_list()) self._assert_result(scorers[0], 1.1, '[1.1, 2.2, 4, 8]') self._assert_result(scorers[1], 8, '[1.1, 2.2, 4, 8]') # float(1 + 2 + 4 + 8) / 4 = 15.0 / 4 = 3.75 self._assert_result(scorers[2], 3.75, '[1, 2, 4, 8]') def _assert_result(self, scorer, expected, features): result_str = scorer.calculate_score(features) actual_result = jsonutils.loads(result_str)[0] self.assertEqual(expected, actual_result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/scoring/test_scoring_factory.py0000664000175000017500000000346000000000000031051 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.scoring import scoring_factory from watcher.tests import base class TestScoringFactory(base.TestCase): def test_get_scoring_engine(self): scorer = scoring_factory.get_scoring_engine('dummy_scorer') self.assertEqual('dummy_scorer', scorer.get_name()) scorer = scoring_factory.get_scoring_engine('dummy_min_scorer') self.assertEqual('dummy_min_scorer', scorer.get_name()) scorer = scoring_factory.get_scoring_engine('dummy_max_scorer') self.assertEqual('dummy_max_scorer', scorer.get_name()) scorer = scoring_factory.get_scoring_engine('dummy_avg_scorer') self.assertEqual('dummy_avg_scorer', scorer.get_name()) self.assertRaises( KeyError, scoring_factory.get_scoring_engine, 'non_existing_scorer') def test_get_scoring_engine_list(self): scoring_engines = scoring_factory.get_scoring_engine_list() engine_names = {'dummy_scorer', 'dummy_min_scorer', 'dummy_max_scorer', 'dummy_avg_scorer'} for scorer in scoring_engines: self.assertIn(scorer.get_name(), engine_names) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6551352 python_watcher-14.0.0/watcher/tests/decision_engine/solution/0000775000175000017500000000000000000000000024452 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/solution/__init__.py0000664000175000017500000000000000000000000026551 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/solution/test_default_solution.py0000664000175000017500000000515700000000000031453 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.decision_engine.solution import default from watcher.decision_engine.strategy import strategies from watcher.tests import base class TestDefaultSolution(base.TestCase): def test_default_solution(self): solution = default.DefaultSolution( goal=mock.Mock(), strategy=strategies.DummyStrategy(config=mock.Mock())) parameters = { "source_node": "server1", "destination_node": "server2", } solution.add_action(action_type="nop", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) self.assertEqual(1, len(solution.actions)) expected_action_type = "nop" expected_parameters = { "source_node": "server1", "destination_node": "server2", "resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36" } self.assertEqual(expected_action_type, solution.actions[0].get('action_type')) self.assertEqual(expected_parameters, solution.actions[0].get('input_parameters')) self.assertEqual('weight', solution.strategy.planner) def test_default_solution_with_no_input_parameters(self): solution = default.DefaultSolution( goal=mock.Mock(), strategy=strategies.DummyStrategy(config=mock.Mock())) solution.add_action(action_type="nop", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36") self.assertEqual(1, len(solution.actions)) expected_action_type = "nop" expected_parameters = { "resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36" } self.assertEqual(expected_action_type, solution.actions[0].get('action_type')) self.assertEqual(expected_parameters, solution.actions[0].get('input_parameters')) self.assertEqual('weight', solution.strategy.planner) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6551352 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/0000775000175000017500000000000000000000000024440 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/__init__.py0000664000175000017500000000000000000000000026537 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6551352 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/context/0000775000175000017500000000000000000000000026124 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/context/__init__.py0000664000175000017500000000000000000000000030223 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/context/test_strategy_context.py0000664000175000017500000001024500000000000033145 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.common import utils from watcher.decision_engine.model.collector import manager from watcher.decision_engine.solution import default from watcher.decision_engine.strategy.context import default as d_strategy_ctx from watcher.decision_engine.strategy.selection import default as d_selector from watcher.decision_engine.strategy import strategies from watcher.tests.db import base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.objects import utils as obj_utils class TestStrategyContext(base.DbTestCase): def setUp(self): super(TestStrategyContext, self).setUp() obj_utils.create_test_goal(self.context, id=1, name="DUMMY") audit_template = obj_utils.create_test_audit_template( self.context, uuid=utils.generate_uuid()) self.audit = obj_utils.create_test_audit( self.context, audit_template_id=audit_template.id) self.fake_cluster = faker_cluster_state.FakerModelCollector() p_model = mock.patch.object( strategies.DummyStrategy, "compute_model", new_callable=mock.PropertyMock) self.m_model = p_model.start() self.addCleanup(p_model.stop) self.m_model.return_value = self.fake_cluster.build_scenario_1() strategy_context = d_strategy_ctx.DefaultStrategyContext() @mock.patch.object(d_selector.DefaultStrategySelector, 'select') def test_execute_strategy(self, mock_call): mock_call.return_value = strategies.DummyStrategy( config=mock.Mock()) solution = self.strategy_context.execute_strategy( self.audit, self.context) self.assertIsInstance(solution, default.DefaultSolution) @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector", mock.Mock()) def test_execute_force_dummy(self): goal = obj_utils.create_test_goal( self.context, id=50, uuid=utils.generate_uuid(), name="my_goal") strategy = obj_utils.create_test_strategy( self.context, id=42, uuid=utils.generate_uuid(), name="dummy", goal_id=goal.id) audit = obj_utils.create_test_audit( self.context, id=2, name='My Audit {0}'.format(2), goal_id=goal.id, strategy_id=strategy.id, uuid=utils.generate_uuid(), ) solution = self.strategy_context.execute_strategy(audit, self.context) self.assertEqual(len(solution.actions), 3) @mock.patch.object(strategies.BasicConsolidation, "execute") @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector", mock.Mock()) def test_execute_force_basic(self, mock_call): expected_strategy = "basic" mock_call.return_value = expected_strategy obj_utils.create_test_goal(self.context, id=50, uuid=utils.generate_uuid(), name="my_goal") strategy = obj_utils.create_test_strategy(self.context, id=42, uuid=utils.generate_uuid(), name=expected_strategy) audit = obj_utils.create_test_audit( self.context, id=2, name='My Audit {0}'.format(2), strategy_id=strategy.id, uuid=utils.generate_uuid(), ) solution = self.strategy_context.execute_strategy(audit, self.context) self.assertEqual(solution, expected_strategy) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6551352 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/selector/0000775000175000017500000000000000000000000026260 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/selector/__init__.py0000664000175000017500000000000000000000000030357 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/selector/test_strategy_selector.py0000664000175000017500000000502200000000000033432 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.common import exception from watcher.decision_engine.loading import default as default_loader from watcher.decision_engine.strategy.selection import ( default as default_selector) from watcher.decision_engine.strategy import strategies from watcher.tests import base class TestStrategySelector(base.TestCase): @mock.patch.object(default_loader.DefaultStrategyLoader, 'load') def test_select_with_strategy_name(self, m_load): expected_goal = 'dummy' expected_strategy = "dummy" strategy_selector = default_selector.DefaultStrategySelector( expected_goal, expected_strategy, osc=None) strategy_selector.select() m_load.assert_called_once_with(expected_strategy, osc=None) @mock.patch.object(default_loader.DefaultStrategyLoader, 'load') @mock.patch.object(default_loader.DefaultStrategyLoader, 'list_available') def test_select_with_goal_name_only(self, m_list_available, m_load): m_list_available.return_value = {"dummy": strategies.DummyStrategy} expected_goal = 'dummy' expected_strategy = "dummy" strategy_selector = default_selector.DefaultStrategySelector( expected_goal, osc=None) strategy_selector.select() m_load.assert_called_once_with(expected_strategy, osc=None) def test_select_non_existing_strategy(self): strategy_selector = default_selector.DefaultStrategySelector( "dummy", "NOT_FOUND") self.assertRaises(exception.LoadingError, strategy_selector.select) @mock.patch.object(default_loader.DefaultStrategyLoader, 'list_available') def test_select_no_available_strategy_for_goal(self, m_list_available): m_list_available.return_value = {} strategy_selector = default_selector.DefaultStrategySelector("dummy") self.assertRaises(exception.NoAvailableStrategyForGoal, strategy_selector.select) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6551352 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/0000775000175000017500000000000000000000000026612 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/__init__.py0000664000175000017500000000000000000000000030711 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_actuator.py0000664000175000017500000000247600000000000032056 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestActuator(TestBaseStrategy): def setUp(self): super(TestActuator, self).setUp() self.strategy = strategies.Actuator(config=mock.Mock()) def test_actuator_strategy(self): fake_action = {"action_type": "TEST", "input_parameters": {"a": "b"}} self.strategy.input_parameters = utils.Struct( {"actions": [fake_action]}) solution = self.strategy.execute() self.assertEqual(1, len(solution.actions)) self.assertEqual([fake_action], solution.actions) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_base.py0000664000175000017500000001200600000000000031134 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.common import exception from watcher.decision_engine.datasources import manager from watcher.decision_engine.model import model_root from watcher.decision_engine.strategy import strategies from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state class TestBaseStrategy(base.TestCase): def setUp(self): super(TestBaseStrategy, self).setUp() # fake cluster self.fake_c_cluster = faker_cluster_state.FakerModelCollector() p_c_model = mock.patch.object( strategies.BaseStrategy, "compute_model", new_callable=mock.PropertyMock) self.m_c_model = p_c_model.start() self.addCleanup(p_c_model.stop) p_audit_scope = mock.patch.object( strategies.BaseStrategy, "audit_scope", new_callable=mock.PropertyMock) self.m_audit_scope = p_audit_scope.start() self.addCleanup(p_audit_scope.stop) self.m_audit_scope.return_value = mock.Mock() self.m_c_model.return_value = model_root.ModelRoot() self.strategy = strategies.DummyStrategy(config=mock.Mock()) class TestBaseStrategyDatasource(TestBaseStrategy): def setUp(self): super(TestBaseStrategyDatasource, self).setUp() self.strategy = strategies.DummyStrategy( config=mock.Mock(datasources=None)) @mock.patch.object(strategies.BaseStrategy, 'osc', None) @mock.patch.object(manager, 'DataSourceManager') @mock.patch.object(strategies.base, 'CONF') def test_global_preference(self, m_conf, m_manager): """Test if the global preference is used""" m_conf.watcher_datasources.datasources = \ ['gnocchi', 'monasca'] # Make sure we access the property and not the underlying function. m_manager.return_value.get_backend.return_value = \ mock.NonCallableMock() # Access the property so that the configuration is read in order to # get the correct datasource self.strategy.datasource_backend m_manager.assert_called_once_with( config=m_conf.watcher_datasources, osc=None) @mock.patch.object(strategies.BaseStrategy, 'osc', None) @mock.patch.object(manager, 'DataSourceManager') @mock.patch.object(strategies.base, 'CONF') def test_global_preference_reverse(self, m_conf, m_manager): """Test if the global preference is used with another order""" m_conf.watcher_datasources.datasources = \ ['monasca', 'gnocchi'] # Make sure we access the property and not the underlying function. m_manager.return_value.get_backend.return_value = \ mock.NonCallableMock() # Access the property so that the configuration is read in order to # get the correct datasource self.strategy.datasource_backend m_manager.assert_called_once_with( config=m_conf.watcher_datasources, osc=None) @mock.patch.object(strategies.BaseStrategy, 'osc', None) @mock.patch.object(manager, 'DataSourceManager') @mock.patch.object(strategies.base, 'CONF') def test_strategy_preference_override(self, m_conf, m_manager): """Test if the global preference can be overridden""" datasources = mock.Mock(datasources=['gnocchi']) self.strategy = strategies.DummyStrategy( config=datasources) m_conf.watcher_datasources.datasources = \ ['monasca', 'gnocchi'] # Access the property so that the configuration is read in order to # get the correct datasource self.strategy.datasource_backend m_manager.assert_called_once_with( config=datasources, osc=None) class TestBaseStrategyException(TestBaseStrategy): def setUp(self): super(TestBaseStrategyException, self).setUp() def test_exception_model(self): self.m_c_model.return_value = None self.assertRaises( exception.ClusterStateNotDefined, self.strategy.execute) def test_exception_stale_cdm(self): self.fake_c_cluster.set_cluster_data_model_as_stale() self.m_c_model.return_value = self.fake_c_cluster.cluster_data_model self.assertRaises( # TODO(Dantali0n) This should return ClusterStale, # improve set_cluster_data_model_as_stale(). exception.ClusterStateNotDefined, self.strategy.execute) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py0000664000175000017500000002311400000000000034232 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections import copy from unittest import mock from watcher.applier.loading import default from watcher.common import clients from watcher.decision_engine.model import model_root from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.model import monasca_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestBasicConsolidation(TestBaseStrategy): scenarios = [ ("Monasca", {"datasource": "monasca", "fake_datasource_cls": monasca_metrics.FakeMonascaMetrics}), ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestBasicConsolidation, self).setUp() # fake metrics self.fake_metrics = self.fake_datasource_cls() p_osc = mock.patch.object( clients, "OpenStackClients") self.m_osc = p_osc.start() self.addCleanup(p_osc.stop) p_datasource = mock.patch.object( strategies.BasicConsolidation, 'datasource_backend', new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( get_host_cpu_usage=self.fake_metrics.get_usage_compute_node_cpu, get_instance_cpu_usage=self.fake_metrics. get_average_usage_instance_cpu ) self.strategy = strategies.BasicConsolidation( config=mock.Mock(datasource=self.datasource)) def test_cluster_size(self): size_cluster = len( self.fake_c_cluster.generate_scenario_1().get_all_compute_nodes()) size_cluster_assert = 5 self.assertEqual(size_cluster_assert, size_cluster) def test_basic_consolidation_score_comute_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_1_score = 0.023333333333333317 self.assertEqual(node_1_score, self.strategy.calculate_score_node( model.get_node_by_uuid("Node_1"))) node_2_score = 0.26666666666666666 self.assertEqual(node_2_score, self.strategy.calculate_score_node( model.get_node_by_uuid("Node_2"))) node_0_score = 0.023333333333333317 self.assertEqual(node_0_score, self.strategy.calculate_score_node( model.get_node_by_uuid("Node_0"))) def test_basic_consolidation_score_instance(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model instance_0 = model.get_instance_by_uuid("INSTANCE_0") instance_0_score = 0.023333333333333317 self.assertEqual( instance_0_score, self.strategy.calculate_score_instance(instance_0)) instance_1 = model.get_instance_by_uuid("INSTANCE_1") instance_1_score = 0.023333333333333317 self.assertEqual( instance_1_score, self.strategy.calculate_score_instance(instance_1)) instance_2 = model.get_instance_by_uuid("INSTANCE_2") instance_2_score = 0.033333333333333326 self.assertEqual( instance_2_score, self.strategy.calculate_score_instance(instance_2)) instance_6 = model.get_instance_by_uuid("INSTANCE_6") instance_6_score = 0.02666666666666669 self.assertEqual( instance_6_score, self.strategy.calculate_score_instance(instance_6)) instance_7 = model.get_instance_by_uuid("INSTANCE_7") instance_7_score = 0.013333333333333345 self.assertEqual( instance_7_score, self.strategy.calculate_score_instance(instance_7)) def test_basic_consolidation_score_instance_disk(self): model = self.fake_c_cluster.generate_scenario_5_with_instance_disk_0() self.m_c_model.return_value = model instance_0 = model.get_instance_by_uuid("INSTANCE_0") instance_0_score = 0.023333333333333355 self.assertEqual( instance_0_score, self.strategy.calculate_score_instance(instance_0)) def test_basic_consolidation_weight(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model instance_0 = model.get_instance_by_uuid("INSTANCE_0") cores = 16 # 80 Go disk = 80 # mem 8 Go mem = 8 instance_0_weight_assert = 3.1999999999999997 self.assertEqual( instance_0_weight_assert, self.strategy.calculate_weight(instance_0, cores, disk, mem)) def test_check_migration(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model all_instances = model.get_all_instances() all_nodes = model.get_all_compute_nodes() instance0 = all_instances[list(all_instances.keys())[0]] node0 = all_nodes[list(all_nodes.keys())[0]] self.strategy.check_migration(node0, node0, instance0) def test_threshold(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model all_nodes = model.get_all_compute_nodes() node0 = all_nodes[list(all_nodes.keys())[0]] self.assertFalse(self.strategy.check_threshold( node0, 1000, 1000, 1000)) def test_basic_consolidation_works_on_model_copy(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = copy.deepcopy(model) self.assertTrue(model_root.ModelRoot.is_isomorphic( model, self.strategy.compute_model)) self.assertIsNot(model, self.strategy.compute_model) def test_basic_consolidation_migration(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) expected_num_migrations = 1 expected_power_state = 1 num_migrations = actions_counter.get("migrate", 0) num_node_state_change = actions_counter.get( "change_nova_service_state", 0) self.assertEqual(expected_num_migrations, num_migrations) self.assertEqual(expected_power_state, num_node_state_change) def test_basic_consolidation_execute_scenario_8_with_4_nodes(self): model = self.fake_c_cluster.generate_scenario_8_with_4_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) expected_num_migrations = 5 expected_power_state = 3 expected_global_efficacy = 75 num_migrations = actions_counter.get("migrate", 0) num_node_state_change = actions_counter.get( "change_nova_service_state", 0) global_efficacy_value = solution.global_efficacy[0].get('value', 0) self.assertEqual(expected_num_migrations, num_migrations) self.assertEqual(expected_power_state, num_node_state_change) self.assertEqual(expected_global_efficacy, global_efficacy_value) # calculate_weight def test_execute_no_workload(self): model = ( self.fake_c_cluster .generate_scenario_4_with_1_node_no_instance()) self.m_c_model.return_value = model with mock.patch.object( strategies.BasicConsolidation, 'calculate_weight' ) as mock_score_call: mock_score_call.return_value = 0 solution = self.strategy.execute() self.assertEqual(0, solution.efficacy.global_efficacy[0].get('value')) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() def test_parameter_backwards_compat(self): # Set the deprecated node values to a none default value self.strategy.input_parameters.update( {'aggregation_method': { "instance": "mean", "compute_node": "mean", "node": 'min'}}) # Pre execute method handles backwards compatibility of parameters self.strategy.pre_execute() # assert that the compute_node values are updated to the those of node self.assertEqual( 'min', self.strategy.aggregation_method['compute_node']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py0000664000175000017500000000362000000000000033301 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestDummyStrategy(TestBaseStrategy): def setUp(self): super(TestDummyStrategy, self).setUp() self.strategy = strategies.DummyStrategy(config=mock.Mock()) def test_dummy_strategy(self): dummy = strategies.DummyStrategy(config=mock.Mock()) dummy.input_parameters = utils.Struct() dummy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'}) solution = dummy.execute() self.assertEqual(3, len(solution.actions)) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'}) solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py0000664000175000017500000000363700000000000033777 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestDummyWithScorer(TestBaseStrategy): def setUp(self): super(TestDummyWithScorer, self).setUp() self.strategy = strategies.DummyWithScorer(config=mock.Mock()) def test_dummy_with_scorer(self): dummy = strategies.DummyWithScorer(config=mock.Mock()) dummy.input_parameters = utils.Struct() dummy.input_parameters.update({'param1': 4.0, 'param2': 'Hi'}) solution = dummy.execute() self.assertEqual(4, len(solution.actions)) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'param1': 4.0, 'param2': 'Hi'}) solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_host_maintenance.py0000664000175000017500000002405400000000000033547 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 chinac.com # # Authors: suzhengwei # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestHostMaintenance(TestBaseStrategy): def setUp(self): super(TestHostMaintenance, self).setUp() self.strategy = strategies.HostMaintenance(config=mock.Mock()) def test_get_instance_state_str(self): mock_instance = mock.MagicMock(state="active") self.assertEqual("active", self.strategy.get_instance_state_str(mock_instance)) mock_instance.state = element.InstanceState("active") self.assertEqual("active", self.strategy.get_instance_state_str(mock_instance)) mock_instance.state = None self.assertRaises( exception.WatcherException, self.strategy.get_instance_state_str, mock_instance) def test_get_node_status_str(self): mock_node = mock.MagicMock(status="enabled") self.assertEqual("enabled", self.strategy.get_node_status_str(mock_node)) mock_node.status = element.ServiceState("enabled") self.assertEqual("enabled", self.strategy.get_node_status_str(mock_node)) mock_node.status = None self.assertRaises( exception.WatcherException, self.strategy.get_node_status_str, mock_node) def test_get_node_capacity(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid("Node_0") node_capacity = dict(cpu=40, ram=132, disk=250) self.assertEqual(node_capacity, self.strategy.get_node_capacity(node_0)) def test_host_fits(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid("Node_0") node_1 = model.get_node_by_uuid("Node_1") self.assertTrue(self.strategy.host_fits(node_0, node_1)) def test_add_action_enable_compute_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') self.strategy.add_action_enable_compute_node(node_0) expected = [{'action_type': 'change_nova_service_state', 'input_parameters': { 'state': 'enabled', 'resource_id': 'Node_0', 'resource_name': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_add_action_maintain_compute_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') self.strategy.add_action_maintain_compute_node(node_0) expected = [{'action_type': 'change_nova_service_state', 'input_parameters': { 'state': 'disabled', 'disabled_reason': 'watcher_maintaining', 'resource_id': 'Node_0', 'resource_name': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_instance_migration(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') node_1 = model.get_node_by_uuid('Node_1') instance_0 = model.get_instance_by_uuid("INSTANCE_0") self.strategy.instance_migration(instance_0, node_0, node_1) self.assertEqual(1, len(self.strategy.solution.actions)) expected = [{'action_type': 'migrate', 'input_parameters': {'destination_node': node_1.uuid, 'source_node': node_0.uuid, 'migration_type': 'live', 'resource_id': instance_0.uuid, 'resource_name': instance_0.name }}] self.assertEqual(expected, self.strategy.solution.actions) def test_instance_migration_without_dest_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') instance_0 = model.get_instance_by_uuid("INSTANCE_0") self.strategy.instance_migration(instance_0, node_0) self.assertEqual(1, len(self.strategy.solution.actions)) expected = [{'action_type': 'migrate', 'input_parameters': {'source_node': node_0.uuid, 'migration_type': 'live', 'resource_id': instance_0.uuid, 'resource_name': instance_0.name }}] self.assertEqual(expected, self.strategy.solution.actions) def test_host_migration(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') node_1 = model.get_node_by_uuid('Node_1') instance_0 = model.get_instance_by_uuid("INSTANCE_0") instance_1 = model.get_instance_by_uuid("INSTANCE_1") self.strategy.host_migration(node_0, node_1) self.assertEqual(2, len(self.strategy.solution.actions)) expected = [{'action_type': 'migrate', 'input_parameters': {'destination_node': node_1.uuid, 'source_node': node_0.uuid, 'migration_type': 'live', 'resource_id': instance_0.uuid, 'resource_name': instance_0.name }}, {'action_type': 'migrate', 'input_parameters': {'destination_node': node_1.uuid, 'source_node': node_0.uuid, 'migration_type': 'live', 'resource_id': instance_1.uuid, 'resource_name': instance_1.name }}] self.assertIn(expected[0], self.strategy.solution.actions) self.assertIn(expected[1], self.strategy.solution.actions) def test_safe_maintain(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') node_1 = model.get_node_by_uuid('Node_1') self.assertFalse(self.strategy.safe_maintain(node_0)) self.assertFalse(self.strategy.safe_maintain(node_1)) model = self.fake_c_cluster.\ generate_scenario_1_with_all_nodes_disable() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') self.assertTrue(self.strategy.safe_maintain(node_0)) def test_try_maintain(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_1 = model.get_node_by_uuid('Node_1') self.strategy.try_maintain(node_1) self.assertEqual(2, len(self.strategy.solution.actions)) def test_exception_compute_node_not_found(self): self.m_c_model.return_value = self.fake_c_cluster.build_scenario_1() self.assertRaises(exception.ComputeNodeNotFound, self.strategy.execute) def test_strategy(self): model = self.fake_c_cluster. \ generate_scenario_9_with_3_active_plus_1_disabled_nodes() self.m_c_model.return_value = model node_2 = model.get_node_by_uuid('Node_2') node_3 = model.get_node_by_uuid('Node_3') instance_4 = model.get_instance_by_uuid("INSTANCE_4") result = self.strategy.pre_execute() self.assertIsNone(result) self.strategy.input_parameters = {"maintenance_node": 'hostname_2', "backup_node": 'hostname_3'} self.strategy.do_execute() expected = [{'action_type': 'change_nova_service_state', 'input_parameters': { 'resource_id': 'Node_3', 'resource_name': 'hostname_3', 'state': 'enabled'}}, {'action_type': 'change_nova_service_state', 'input_parameters': { 'resource_id': 'Node_2', 'resource_name': 'hostname_2', 'state': 'disabled', 'disabled_reason': 'watcher_maintaining'}}, {'action_type': 'migrate', 'input_parameters': { 'destination_node': node_3.uuid, 'source_node': node_2.uuid, 'migration_type': 'live', 'resource_id': instance_4.uuid, 'resource_name': instance_4.name}}] self.assertEqual(expected, self.strategy.solution.actions) result = self.strategy.post_execute() self.assertIsNone(result) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_node_resource_consolidation.py 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_node_resource_consolida0000664000175000017500000003704200000000000034471 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy import strategies from watcher import objects from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy from watcher.tests.objects import utils as obj_utils class TestNodeResourceConsolidation(TestBaseStrategy): def setUp(self): super(TestNodeResourceConsolidation, self).setUp() self.strategy = strategies.NodeResourceConsolidation( config=mock.Mock()) self.model = self.fake_c_cluster.generate_scenario_10() self.m_c_model.return_value = self.model self.strategy.input_parameters = {'host_choice': 'auto'} def test_pre_execute(self): planner = 'node_resource_consolidation' self.assertEqual('auto', self.strategy.host_choice) self.assertNotEqual(planner, self.strategy.planner) self.strategy.input_parameters.update( {'host_choice': 'specify'}) self.strategy.pre_execute() self.assertEqual(planner, self.strategy.planner) self.assertEqual('specify', self.strategy.host_choice) def test_check_resources(self): instance = [self.model.get_instance_by_uuid( "6ae05517-a512-462d-9d83-90c313b5a8ff")] dest = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c972") # test destination is null result = self.strategy.check_resources(instance, []) self.assertFalse(result) result = self.strategy.check_resources(instance, dest) self.assertTrue(result) self.assertEqual([], instance) def test_select_destination(self): instance0 = self.model.get_instance_by_uuid( "6ae05517-a512-462d-9d83-90c313b5a8ff") source = self.model.get_node_by_instance_uuid( "6ae05517-a512-462d-9d83-90c313b5a8ff") expected = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c972") # test destination is null result = self.strategy.select_destination(instance0, source, []) self.assertIsNone(result) nodes = list(self.model.get_all_compute_nodes().values()) nodes.remove(source) result = self.strategy.select_destination(instance0, source, nodes) self.assertEqual(expected, result) def test_add_migrate_actions_with_null(self): self.strategy.add_migrate_actions([], []) self.assertEqual([], self.strategy.solution.actions) self.strategy.add_migrate_actions(None, None) self.assertEqual([], self.strategy.solution.actions) def test_add_migrate_actions_with_auto(self): self.strategy.host_choice = 'auto' source = self.model.get_node_by_instance_uuid( "6ae05517-a512-462d-9d83-90c313b5a8ff") nodes = list(self.model.get_all_compute_nodes().values()) nodes.remove(source) self.strategy.add_migrate_actions([source], nodes) expected = [{'action_type': 'migrate', 'input_parameters': { 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f1', 'resource_name': 'INSTANCE_1', 'source_node': 'hostname_0'}}, {'action_type': 'migrate', 'input_parameters': { 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8ff', 'resource_name': 'INSTANCE_0', 'source_node': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_add_migrate_actions_with_specify(self): self.strategy.host_choice = 'specify' source = self.model.get_node_by_instance_uuid( "6ae05517-a512-462d-9d83-90c313b5a8ff") nodes = list(self.model.get_all_compute_nodes().values()) nodes.remove(source) self.strategy.add_migrate_actions([source], nodes) expected = [{'action_type': 'migrate', 'input_parameters': { 'destination_node': 'hostname_1', 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f1', 'resource_name': 'INSTANCE_1', 'source_node': 'hostname_0'}}, {'action_type': 'migrate', 'input_parameters': { 'destination_node': 'hostname_2', 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8ff', 'resource_name': 'INSTANCE_0', 'source_node': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_add_migrate_actions_with_no_action(self): self.strategy.host_choice = 'specify' source = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c971") dest = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c972") self.strategy.add_migrate_actions([source], [dest]) self.assertEqual([], self.strategy.solution.actions) def test_add_change_node_state_actions_with_exeception(self): self.assertRaises(exception.IllegalArgumentException, self.strategy.add_change_node_state_actions, [], 'down') def test_add_change_node_state_actions(self): node1 = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c972") node2 = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c97f") # disable two nodes status = element.ServiceState.DISABLED.value result = self.strategy.add_change_node_state_actions( [node1, node2], status) self.assertEqual([node1, node2], result) expected = [{ 'action_type': 'change_nova_service_state', 'input_parameters': { 'disabled_reason': 'Watcher node resource ' 'consolidation strategy', 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c972', 'resource_name': 'hostname_2', 'state': 'disabled'}}, { 'action_type': 'change_nova_service_state', 'input_parameters': { 'disabled_reason': 'Watcher node resource consolidation ' 'strategy', 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c97f', 'resource_name': 'hostname_0', 'state': 'disabled'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_add_change_node_state_actions_one_disabled(self): node1 = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c972") node2 = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c97f") # disable two nodes status = element.ServiceState.DISABLED.value # one enable, one disable node1.status = element.ServiceState.DISABLED.value result = self.strategy.add_change_node_state_actions( [node1, node2], status) self.assertEqual([node2], result) expected = [{ 'action_type': 'change_nova_service_state', 'input_parameters': { 'disabled_reason': 'Watcher node resource consolidation ' 'strategy', 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c97f', 'resource_name': 'hostname_0', 'state': 'disabled'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_get_nodes_migrate_failed_return_null(self): self.strategy.audit = None result = self.strategy.get_nodes_migrate_failed() self.assertEqual([], result) self.strategy.audit = mock.Mock( audit_type=objects.audit.AuditType.ONESHOT.value) result = self.strategy.get_nodes_migrate_failed() self.assertEqual([], result) @mock.patch.object(objects.action.Action, 'list') def test_get_nodes_migrate_failed(self, mock_list): self.strategy.audit = mock.Mock( audit_type=objects.audit.AuditType.CONTINUOUS.value) fake_action = obj_utils.get_test_action( self.context, state=objects.action.State.FAILED, action_type='migrate', input_parameters={ 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f1'}) mock_list.return_value = [fake_action] result = self.strategy.get_nodes_migrate_failed() expected = self.model.get_node_by_uuid( '89dce55c-8e74-4402-b23f-32aaf216c97f') self.assertEqual([expected], result) def test_group_nodes_with_ONESHOT(self): self.strategy.audit = mock.Mock( audit_type=objects.audit.AuditType.ONESHOT.value) nodes = list(self.model.get_all_compute_nodes().values()) result = self.strategy.group_nodes(nodes) node0 = self.model.get_node_by_name('hostname_0') node1 = self.model.get_node_by_name('hostname_1') node2 = self.model.get_node_by_name('hostname_2') node3 = self.model.get_node_by_name('hostname_3') node4 = self.model.get_node_by_name('hostname_4') node5 = self.model.get_node_by_name('hostname_5') node6 = self.model.get_node_by_name('hostname_6') node7 = self.model.get_node_by_name('hostname_7') source_nodes = [node3, node4, node7] dest_nodes = [node2, node0, node1] self.assertIn(node5, result[0]) self.assertIn(node6, result[0]) self.assertEqual(source_nodes, result[1]) self.assertEqual(dest_nodes, result[2]) @mock.patch.object(objects.action.Action, 'list') def test_group_nodes_with_CONTINUOUS(self, mock_list): self.strategy.audit = mock.Mock( audit_type=objects.audit.AuditType.CONTINUOUS.value) fake_action = obj_utils.get_test_action( self.context, state=objects.action.State.FAILED, action_type='migrate', input_parameters={ 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f6'}) mock_list.return_value = [fake_action] nodes = list(self.model.get_all_compute_nodes().values()) result = self.strategy.group_nodes(nodes) node0 = self.model.get_node_by_name('hostname_0') node1 = self.model.get_node_by_name('hostname_1') node2 = self.model.get_node_by_name('hostname_2') node3 = self.model.get_node_by_name('hostname_3') node4 = self.model.get_node_by_name('hostname_4') node5 = self.model.get_node_by_name('hostname_5') node6 = self.model.get_node_by_name('hostname_6') node7 = self.model.get_node_by_name('hostname_7') source_nodes = [node4, node7] dest_nodes = [node3, node2, node0, node1] self.assertIn(node5, result[0]) self.assertIn(node6, result[0]) self.assertEqual(source_nodes, result[1]) self.assertEqual(dest_nodes, result[2]) @mock.patch.object(objects.action.Action, 'list') def test_execute_with_auto(self, mock_list): fake_action = obj_utils.get_test_action( self.context, state=objects.action.State.FAILED, action_type='migrate', input_parameters={ 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f6'}) mock_list.return_value = [fake_action] mock_audit = mock.Mock( audit_type=objects.audit.AuditType.CONTINUOUS.value) self.strategy.host_choice = 'auto' self.strategy.do_execute(mock_audit) expected = [ {'action_type': 'change_nova_service_state', 'input_parameters': { 'disabled_reason': 'Watcher node resource consolidation ' 'strategy', 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c975', 'resource_name': 'hostname_5', 'state': 'disabled'}}, {'action_type': 'change_nova_service_state', 'input_parameters': { 'disabled_reason': 'Watcher node resource consolidation ' 'strategy', 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c974', 'resource_name': 'hostname_4', 'state': 'disabled'}}, {'action_type': 'migrate', 'input_parameters': { 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f7', 'resource_name': 'INSTANCE_7', 'source_node': 'hostname_4'}}, {'action_type': 'migrate', 'input_parameters': { 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f8', 'resource_name': 'INSTANCE_8', 'source_node': 'hostname_7'}}, {'action_type': 'change_nova_service_state', 'input_parameters': { 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c975', 'resource_name': 'hostname_5', 'state': 'enabled'}}, {'action_type': 'change_nova_service_state', 'input_parameters': { 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c974', 'resource_name': 'hostname_4', 'state': 'enabled'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_execute_with_specify(self): mock_audit = mock.Mock( audit_type=objects.audit.AuditType.ONESHOT.value) self.strategy.host_choice = 'specify' self.strategy.do_execute(mock_audit) expected = [ {'action_type': 'migrate', 'input_parameters': { 'destination_node': 'hostname_2', 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f6', 'resource_name': 'INSTANCE_6', 'source_node': 'hostname_3'}}, {'action_type': 'migrate', 'input_parameters': { 'destination_node': 'hostname_0', 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f7', 'resource_name': 'INSTANCE_7', 'source_node': 'hostname_4'}}, {'action_type': 'migrate', 'input_parameters': { 'destination_node': 'hostname_1', 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f8', 'resource_name': 'INSTANCE_8', 'source_node': 'hostname_7'}}] self.assertEqual(expected, self.strategy.solution.actions) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_noisy_neighbor.py0000664000175000017500000001243000000000000033241 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from unittest import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestNoisyNeighbor(TestBaseStrategy): scenarios = [ ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestNoisyNeighbor, self).setUp() # fake metrics self.f_metrics = self.fake_datasource_cls() p_datasource = mock.patch.object( strategies.NoisyNeighbor, "datasource_backend", new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( get_instance_l3_cache_usage=self.f_metrics.mock_get_statistics_nn) self.strategy = strategies.NoisyNeighbor(config=mock.Mock()) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'cache_threshold': 35}) self.strategy.threshold = 35 self.strategy.input_parameters.update({'period': 100}) self.strategy.threshold = 100 def test_group_hosts(self): self.strategy.cache_threshold = 35 self.strategy.period = 100 model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model node_uuid = 'Node_1' n1, n2 = self.strategy.group_hosts() self.assertIn(node_uuid, n1) self.assertEqual(n1[node_uuid]['priority_vm'].uuid, 'INSTANCE_3') self.assertEqual(n1[node_uuid]['noisy_vm'].uuid, 'INSTANCE_4') self.assertEqual('Node_0', n2[0].uuid) def test_find_priority_instance(self): self.strategy.cache_threshold = 35 self.strategy.period = 100 model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model potential_prio_inst = model.get_instance_by_uuid('INSTANCE_3') inst_res = self.strategy.find_priority_instance(potential_prio_inst) self.assertEqual('INSTANCE_3', inst_res.uuid) def test_find_noisy_instance(self): self.strategy.cache_threshold = 35 self.strategy.period = 100 model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model potential_noisy_inst = model.get_instance_by_uuid('INSTANCE_4') inst_res = self.strategy.find_noisy_instance(potential_noisy_inst) self.assertEqual('INSTANCE_4', inst_res.uuid) def test_filter_destination_hosts(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.cache_threshold = 35 self.strategy.period = 100 n1, n2 = self.strategy.group_hosts() mig_source_node = max(n1.keys(), key=lambda a: n1[a]['priority_vm']) instance_to_mig = n1[mig_source_node]['noisy_vm'] dest_hosts = self.strategy.filter_dest_servers( n2, instance_to_mig) self.assertEqual(1, len(dest_hosts)) self.assertEqual('Node_0', dest_hosts[0].uuid) def test_execute_no_workload(self): self.strategy.cache_threshold = 35 self.strategy.period = 100 model = self.fake_c_cluster.\ generate_scenario_4_with_1_node_no_instance() self.m_c_model.return_value = model solution = self.strategy.execute() self.assertEqual([], solution.actions) def test_execute(self): self.strategy.cache_threshold = 35 self.strategy.period = 100 model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) num_migrations = actions_counter.get("migrate", 0) self.assertEqual(1, num_migrations) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py0000664000175000017500000001113100000000000034321 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 Intel Corp # # Authors: Zhenzan Zhou # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections from unittest import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestOutletTempControl(TestBaseStrategy): scenarios = [ ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestOutletTempControl, self).setUp() # fake metrics self.fake_metrics = self.fake_datasource_cls() p_datasource = mock.patch.object( strategies.OutletTempControl, 'datasource_backend', new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( statistic_aggregation=self.fake_metrics.mock_get_statistics, NAME=self.fake_metrics.NAME) self.strategy = strategies.OutletTempControl( config=mock.Mock(datasource=self.datasource)) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'threshold': 34.3}) self.strategy.threshold = 34.3 def test_group_hosts_by_outlet_temp(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model n1, n2 = self.strategy.group_hosts_by_outlet_temp() self.assertEqual("af69c544-906b-4a6a-a9c6-c1f7a8078c73", n1[0]['compute_node'].uuid) self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73", n2[0]['compute_node'].uuid) def test_choose_instance_to_migrate(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model n1, n2 = self.strategy.group_hosts_by_outlet_temp() instance_to_mig = self.strategy.choose_instance_to_migrate(n1) self.assertEqual('af69c544-906b-4a6a-a9c6-c1f7a8078c73', instance_to_mig[0].uuid) self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517', instance_to_mig[1].uuid) def test_filter_dest_servers(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model n1, n2 = self.strategy.group_hosts_by_outlet_temp() instance_to_mig = self.strategy.choose_instance_to_migrate(n1) dest_hosts = self.strategy.filter_dest_servers(n2, instance_to_mig[1]) self.assertEqual(1, len(dest_hosts)) self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73", dest_hosts[0]['compute_node'].uuid) def test_execute_no_workload(self): model = self.fake_c_cluster.\ generate_scenario_4_with_1_node_no_instance() self.m_c_model.return_value = model solution = self.strategy.execute() self.assertEqual([], solution.actions) def test_execute(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) num_migrations = actions_counter.get("migrate", 0) self.assertEqual(1, num_migrations) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_saving_energy.py0000664000175000017500000001460000000000000033064 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock from watcher.common import clients from watcher.common.metal_helper import constants as m_constants from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine import fake_metal_helper from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestSavingEnergy(TestBaseStrategy): def setUp(self): super(TestSavingEnergy, self).setUp() self.fake_nodes = [fake_metal_helper.get_mock_metal_node(), fake_metal_helper.get_mock_metal_node()] self._metal_helper = mock.Mock() self._metal_helper.list_compute_nodes.return_value = self.fake_nodes p_nova = mock.patch.object(clients.OpenStackClients, 'nova') self.m_nova = p_nova.start() self.addCleanup(p_nova.stop) self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() self.strategy = strategies.SavingEnergy( config=mock.Mock()) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update( {'free_used_percent': 10.0, 'min_free_hosts_num': 1}) self.strategy.free_used_percent = 10.0 self.strategy.min_free_hosts_num = 1 self.strategy._metal_helper = self._metal_helper self.strategy._nova_client = self.m_nova def test_get_hosts_pool_with_vms_node_pool(self): self._metal_helper.list_compute_nodes.return_value = [ fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.ON, hostname='hostname_0', running_vms=2), fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.OFF, hostname='hostname_1', running_vms=2), ] self.strategy.get_hosts_pool() self.assertEqual(len(self.strategy.with_vms_node_pool), 2) self.assertEqual(len(self.strategy.free_poweron_node_pool), 0) self.assertEqual(len(self.strategy.free_poweroff_node_pool), 0) def test_get_hosts_pool_free_poweron_node_pool(self): self._metal_helper.list_compute_nodes.return_value = [ fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.ON, hostname='hostname_0', running_vms=0), fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.ON, hostname='hostname_1', running_vms=0), ] self.strategy.get_hosts_pool() self.assertEqual(len(self.strategy.with_vms_node_pool), 0) self.assertEqual(len(self.strategy.free_poweron_node_pool), 2) self.assertEqual(len(self.strategy.free_poweroff_node_pool), 0) def test_get_hosts_pool_free_poweroff_node_pool(self): self._metal_helper.list_compute_nodes.return_value = [ fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.OFF, hostname='hostname_0', running_vms=0), fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.OFF, hostname='hostname_1', running_vms=0), ] self.strategy.get_hosts_pool() self.assertEqual(len(self.strategy.with_vms_node_pool), 0) self.assertEqual(len(self.strategy.free_poweron_node_pool), 0) self.assertEqual(len(self.strategy.free_poweroff_node_pool), 2) def test_get_hosts_pool_with_node_out_model(self): self._metal_helper.list_compute_nodes.return_value = [ fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.OFF, hostname='hostname_0', running_vms=0), fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.OFF, hostname='hostname_10', running_vms=0), ] self.strategy.get_hosts_pool() self.assertEqual(len(self.strategy.with_vms_node_pool), 0) self.assertEqual(len(self.strategy.free_poweron_node_pool), 0) self.assertEqual(len(self.strategy.free_poweroff_node_pool), 1) def test_save_energy_poweron(self): self.strategy.free_poweroff_node_pool = [ fake_metal_helper.get_mock_metal_node(), fake_metal_helper.get_mock_metal_node(), ] self.strategy.save_energy() self.assertEqual(len(self.strategy.solution.actions), 1) action = self.strategy.solution.actions[0] self.assertEqual(action.get('input_parameters').get('state'), 'on') def test_save_energy_poweroff(self): self.strategy.free_poweron_node_pool = [ mock.Mock(uuid='922d4762-0bc5-4b30-9cb9-48ab644dd861'), mock.Mock(uuid='922d4762-0bc5-4b30-9cb9-48ab644dd862') ] self.strategy.save_energy() self.assertEqual(len(self.strategy.solution.actions), 1) action = self.strategy.solution.actions[0] self.assertEqual(action.get('input_parameters').get('state'), 'off') def test_execute(self): self._metal_helper.list_compute_nodes.return_value = [ fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.ON, hostname='hostname_0', running_vms=0), fake_metal_helper.get_mock_metal_node( power_state=m_constants.PowerState.ON, hostname='hostname_1', running_vms=0), ] model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model solution = self.strategy.execute() self.assertEqual(len(solution.actions), 1) ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_storage_capacity_balance.py 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_storage_capacity_balanc0000664000175000017500000002231300000000000034416 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Authors: Canwei Li # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock from watcher.common import cinder_helper from watcher.common import clients from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestStorageCapacityBalance(TestBaseStrategy): def setUp(self): super(TestStorageCapacityBalance, self).setUp() def test_fake_pool(name, free, total, allocated): fake_pool = mock.MagicMock() fake_pool.name = name fake_pool.pool_name = name.split('#')[1] fake_pool.volume_backend_name = name.split('#')[1] fake_pool.free_capacity_gb = free fake_pool.total_capacity_gb = total fake_pool.allocated_capacity_gb = allocated fake_pool.max_over_subscription_ratio = 1.0 return fake_pool self.fake_pool1 = test_fake_pool('host1@IPSAN-1#pool1', '60', '100', '90') self.fake_pool2 = test_fake_pool('host1@IPSAN-1#pool2', '20', '100', '80') self.fake_pool3 = test_fake_pool('host1@IPSAN-1#local_vstorage', '20', '100', '80') self.fake_pools = [self.fake_pool1, self.fake_pool2, self.fake_pool3] def test_fake_vol(id, name, size, status, bootable, migration_status=None, volume_type=None): fake_vol = mock.MagicMock() fake_vol.id = id fake_vol.name = name fake_vol.size = size fake_vol.status = status fake_vol.bootable = bootable fake_vol.migration_status = migration_status fake_vol.volume_type = volume_type setattr(fake_vol, 'os-vol-host-attr:host', 'host1@IPSAN-1#pool2') return fake_vol self.fake_vol1 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd861', 'test_volume1', 4, 'available', 'true', 'success', volume_type='type2') self.fake_vol2 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd862', 'test_volume2', 10, 'in-use', 'false') self.fake_vol3 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd863', 'test_volume3', 4, 'in-use', 'true', volume_type='type2') self.fake_vol4 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd864', 'test_volume4', 10, 'error', 'true') self.fake_vol5 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd865', 'test_volume5', 15, 'in-use', 'true') self.fake_volumes = [self.fake_vol1, self.fake_vol2, self.fake_vol3, self.fake_vol4, self.fake_vol5] def test_fake_snap(vol_id): fake_snap = mock.MagicMock() fake_snap.volume_id = vol_id return fake_snap self.fake_snap = [test_fake_snap( '922d4762-0bc5-4b30-9cb9-48ab644dd865')] def test_fake_volume_type(type_name, extra_specs): fake_type = mock.MagicMock() fake_type.name = type_name fake_type.extra_specs = extra_specs return fake_type self.fake_types = [test_fake_volume_type( 'type1', {'volume_backend_name': 'pool1'}), test_fake_volume_type( 'type2', {'volume_backend_name': 'pool2'}) ] self.fake_c_cluster = faker_cluster_state.FakerStorageModelCollector() osc = clients.OpenStackClients() p_cinder = mock.patch.object(osc, 'cinder') p_cinder.start() self.addCleanup(p_cinder.stop) self.m_cinder = cinder_helper.CinderHelper(osc=osc) self.m_cinder.get_storage_pool_list = mock.Mock( return_value=self.fake_pools) self.m_cinder.get_volume_list = mock.Mock( return_value=self.fake_volumes) self.m_cinder.get_volume_snapshots_list = mock.Mock( return_value=self.fake_snap) self.m_cinder.get_volume_type_list = mock.Mock( return_value=self.fake_types) model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.strategy = strategies.StorageCapacityBalance( config=mock.Mock(), osc=osc) self.strategy._cinder = self.m_cinder self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update( {'volume_threshold': 80.0}) self.strategy.volume_threshold = 80.0 def test_get_pools(self): self.strategy.config.ex_pools = "local_vstorage" pools = self.strategy.get_pools(self.m_cinder) self.assertEqual(len(pools), 2) def test_get_volumes(self): volumes = self.strategy.get_volumes(self.m_cinder) self.assertEqual(len(volumes), 3) def test_group_pools(self): self.strategy.config.ex_pools = "local_vstorage" pools = self.strategy.get_pools(self.m_cinder) over_pools, under_pools = self.strategy.group_pools(pools, 0.50) self.assertEqual(len(under_pools), 1) self.assertEqual(len(over_pools), 1) over_pools, under_pools = self.strategy.group_pools(pools, 0.85) self.assertEqual(len(under_pools), 2) self.assertEqual(len(over_pools), 0) over_pools, under_pools = self.strategy.group_pools(pools, 0.30) self.assertEqual(len(under_pools), 0) self.assertEqual(len(over_pools), 2) def test_get_volume_type_by_name(self): vol_type = self.strategy.get_volume_type_by_name( self.m_cinder, 'pool1') self.assertEqual(len(vol_type), 1) vol_type = self.strategy.get_volume_type_by_name( self.m_cinder, 'ks3200') self.assertEqual(len(vol_type), 0) def test_check_pool_type(self): pool_type = self.strategy.check_pool_type( self.fake_vol3, self.fake_pool1) self.assertIsNotNone(pool_type) pool_type = self.strategy.check_pool_type( self.fake_vol3, self.fake_pool2) self.assertIsNone(pool_type) def test_migrate_fit(self): self.strategy.config.ex_pools = "local_vstorage" pools = self.strategy.get_pools(self.m_cinder) self.strategy.source_pools, self.strategy.dest_pools = ( self.strategy.group_pools(pools, 0.60)) target_pool = self.strategy.migrate_fit(self.fake_vol2, 0.60) self.assertIsNotNone(target_pool) target_pool = self.strategy.migrate_fit(self.fake_vol3, 0.50) self.assertIsNone(target_pool) target_pool = self.strategy.migrate_fit(self.fake_vol5, 0.60) self.assertIsNone(target_pool) def test_retype_fit(self): self.strategy.config.ex_pools = "local_vstorage" pools = self.strategy.get_pools(self.m_cinder) self.strategy.source_pools, self.strategy.dest_pools = ( self.strategy.group_pools(pools, 0.50)) target_pool = self.strategy.retype_fit(self.fake_vol1, 0.50) self.assertIsNotNone(target_pool) target_pool = self.strategy.retype_fit(self.fake_vol2, 0.50) self.assertIsNone(target_pool) target_pool = self.strategy.retype_fit(self.fake_vol3, 0.50) self.assertIsNotNone(target_pool) target_pool = self.strategy.retype_fit(self.fake_vol5, 0.60) self.assertIsNone(target_pool) def test_execute(self): self.strategy.input_parameters.update( {'volume_threshold': 45.0}) self.strategy.config.ex_pools = "local_vstorage" solution = self.strategy.execute() self.assertEqual(len(solution.actions), 1) setattr(self.fake_pool1, 'free_capacity_gb', '60') self.strategy.input_parameters.update( {'volume_threshold': 50.0}) solution = self.strategy.execute() self.assertEqual(len(solution.actions), 2) setattr(self.fake_pool1, 'free_capacity_gb', '60') self.strategy.input_parameters.update( {'volume_threshold': 60.0}) solution = self.strategy.execute() self.assertEqual(len(solution.actions), 3) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_strategy_endpoint.py0000664000175000017500000000550600000000000033773 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2018 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.decision_engine.strategy.strategies import base as strategy_base from watcher.tests import base class TestStrategyEndpoint(base.BaseTestCase): def test_collect_metrics(self): datasource = mock.MagicMock() datasource.list_metrics.return_value = ["m1", "m2"] datasource.METRIC_MAP = {"metric1": "m1", "metric2": "m2", "metric3": "m3"} strategy = mock.MagicMock() strategy.DATASOURCE_METRICS = ["metric1", "metric2", "metric3"] strategy.config.datasource = "gnocchi" se = strategy_base.StrategyEndpoint(mock.MagicMock()) result = se._collect_metrics(strategy, datasource) expected_result = {'type': 'Metrics', 'state': [{"m1": "available"}, {"m2": "available"}, {"m3": "not available"}], 'mandatory': False, 'comment': ''} self.assertEqual(expected_result, result) def test_get_datasource_status(self): strategy = mock.MagicMock() datasource = mock.MagicMock() datasource.NAME = 'gnocchi' datasource.check_availability.return_value = "available" se = strategy_base.StrategyEndpoint(mock.MagicMock()) result = se._get_datasource_status(strategy, datasource) expected_result = {'type': 'Datasource', 'state': "gnocchi: available", 'mandatory': True, 'comment': ''} self.assertEqual(expected_result, result) def test_get_cdm(self): strategy = mock.MagicMock() strategy.compute_model = mock.MagicMock() del strategy.storage_model strategy.baremetal_model = mock.MagicMock() se = strategy_base.StrategyEndpoint(mock.MagicMock()) result = se._get_cdm(strategy) expected_result = {'type': 'CDM', 'state': [{"compute_model": "available"}, {"storage_model": "not available"}, {"baremetal_model": "available"}], 'mandatory': True, 'comment': ''} self.assertEqual(expected_result, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py0000664000175000017500000001614100000000000033430 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Junjie-Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections from unittest import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestUniformAirflow(TestBaseStrategy): scenarios = [ ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestUniformAirflow, self).setUp() # fake metrics self.fake_metrics = self.fake_datasource_cls() p_datasource = mock.patch.object( strategies.UniformAirflow, 'datasource_backend', new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( statistic_aggregation=self.fake_metrics.mock_get_statistics, NAME=self.fake_metrics.NAME) self.strategy = strategies.UniformAirflow( config=mock.Mock(datasource=self.datasource)) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'threshold_airflow': 400.0, 'threshold_inlet_t': 28.0, 'threshold_power': 350.0, 'period': 300}) self.strategy.threshold_airflow = 400 self.strategy.threshold_inlet_t = 28 self.strategy.threshold_power = 350 self._period = 300 self.strategy.pre_execute() def test_calc_used_resource(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model node = model.get_node_by_uuid('Node_0') cores_used, mem_used, disk_used = ( self.strategy.calculate_used_resource(node)) self.assertEqual((cores_used, mem_used, disk_used), (25, 4, 40)) def test_group_hosts_by_airflow(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold_airflow = 300 n1, n2 = self.strategy.group_hosts_by_airflow() # print n1, n2, avg, w_map self.assertEqual(n1[0]['node'].uuid, 'Node_0') self.assertEqual(n2[0]['node'].uuid, 'Node_1') def test_choose_instance_to_migrate(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 22 n1, n2 = self.strategy.group_hosts_by_airflow() instance_to_mig = self.strategy.choose_instance_to_migrate(n1) self.assertEqual(instance_to_mig[0].uuid, 'Node_0') self.assertEqual(len(instance_to_mig[1]), 1) self.assertIn(instance_to_mig[1][0].uuid, {'cae81432-1631-4d4e-b29c-6f3acdcde906', '73b09e16-35b7-4922-804e-e8f5d9b740fc'}) def test_choose_instance_to_migrate_all(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 25 n1, n2 = self.strategy.group_hosts_by_airflow() instance_to_mig = self.strategy.choose_instance_to_migrate(n1) self.assertEqual(instance_to_mig[0].uuid, 'Node_0') self.assertEqual(len(instance_to_mig[1]), 2) self.assertEqual({'cae81432-1631-4d4e-b29c-6f3acdcde906', '73b09e16-35b7-4922-804e-e8f5d9b740fc'}, {inst.uuid for inst in instance_to_mig[1]}) def test_choose_instance_notfound(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 22 n1, n2 = self.strategy.group_hosts_by_airflow() instances = model.get_all_instances() [model.remove_instance(inst) for inst in instances.values()] instance_to_mig = self.strategy.choose_instance_to_migrate(n1) self.assertIsNone(instance_to_mig) def test_filter_destination_hosts(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 22 n1, n2 = self.strategy.group_hosts_by_airflow() instance_to_mig = self.strategy.choose_instance_to_migrate(n1) dest_hosts = self.strategy.filter_destination_hosts( n2, instance_to_mig[1]) self.assertEqual(len(dest_hosts), 1) self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1') self.assertIn(instance_to_mig[1][0].uuid, {'cae81432-1631-4d4e-b29c-6f3acdcde906', '73b09e16-35b7-4922-804e-e8f5d9b740fc'}) def test_execute_no_workload(self): self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 25 self.strategy.threshold_power = 300 model = self.fake_c_cluster.\ generate_scenario_4_with_1_node_no_instance() self.m_c_model.return_value = model solution = self.strategy.execute() self.assertEqual([], solution.actions) def test_execute(self): self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 25 self.strategy.threshold_power = 300 model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) num_migrations = actions_counter.get("migrate", 0) self.assertEqual(num_migrations, 2) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidati0000664000175000017500000004540300000000000034516 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Authors: Vojtech CIMA # Bruno GRAZIOLI # Sean MURPHY # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock from watcher.decision_engine.model import element from watcher.decision_engine.solution.base import BaseSolution from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import faker_cluster_and_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestVMWorkloadConsolidation(TestBaseStrategy): scenarios = [ ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": faker_cluster_and_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestVMWorkloadConsolidation, self).setUp() # fake cluster self.fake_c_cluster = faker_cluster_and_metrics.FakerModelCollector() p_datasource = mock.patch.object( strategies.VMWorkloadConsolidation, 'datasource_backend', new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) # fake metrics self.fake_metrics = self.fake_datasource_cls( self.m_c_model.return_value) self.m_datasource.return_value = mock.Mock( get_instance_cpu_usage=( self.fake_metrics.get_instance_cpu_util), get_instance_ram_usage=( self.fake_metrics.get_instance_ram_util), get_instance_root_disk_size=( self.fake_metrics.get_instance_disk_root_size), get_host_cpu_usage=( self.fake_metrics.get_compute_node_cpu_util), get_host_ram_usage=( self.fake_metrics.get_compute_node_ram_util) ) self.strategy = strategies.VMWorkloadConsolidation( config=mock.Mock(datasources=self.datasource)) def test_get_instance_utilization(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model instance_0 = model.get_instance_by_uuid("INSTANCE_0") instance_util = dict(cpu=1.0, ram=1, disk=10) self.assertEqual( instance_util, self.strategy.get_instance_utilization(instance_0)) def test_get_node_utilization(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model node_0 = model.get_node_by_uuid("Node_0") node_util = dict(cpu=1.0, ram=1, disk=10) self.assertEqual( node_util, self.strategy.get_node_utilization(node_0)) def test_get_node_utilization_using_host_metrics(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model node_0 = model.get_node_by_uuid("Node_0") # "get_node_utilization" is expected to return the maximum # between the host metrics and the sum of the instance metrics. data_src = self.m_datasource.return_value cpu_usage = 30 data_src.get_host_cpu_usage = mock.Mock(return_value=cpu_usage) data_src.get_host_ram_usage = mock.Mock(return_value=512 * 1024) exp_cpu_usage = cpu_usage * node_0.vcpus / 100 exp_node_util = dict(cpu=exp_cpu_usage, ram=512, disk=10) self.assertEqual( exp_node_util, self.strategy.get_node_utilization(node_0)) def test_get_node_utilization_after_migrations(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model node_0 = model.get_node_by_uuid("Node_0") node_1 = model.get_node_by_uuid("Node_1") data_src = self.m_datasource.return_value cpu_usage = 30 host_ram_usage_mb = 512 data_src.get_host_cpu_usage = mock.Mock(return_value=cpu_usage) data_src.get_host_ram_usage = mock.Mock( return_value=host_ram_usage_mb * 1024) instance_uuid = 'INSTANCE_0' instance = model.get_instance_by_uuid(instance_uuid) self.strategy.add_migration(instance, node_0, node_1) instance_util = self.strategy.get_instance_utilization(instance) # Ensure that we take into account planned migrations when # determining node utilization exp_node_0_cpu_usage = ( cpu_usage * node_0.vcpus) / 100 - instance_util['cpu'] exp_node_1_cpu_usage = ( cpu_usage * node_1.vcpus) / 100 + instance_util['cpu'] exp_node_0_ram_usage = host_ram_usage_mb - instance.memory exp_node_1_ram_usage = host_ram_usage_mb + instance.memory exp_node_0_util = dict( cpu=exp_node_0_cpu_usage, ram=exp_node_0_ram_usage, disk=0) exp_node_1_util = dict( cpu=exp_node_1_cpu_usage, ram=exp_node_1_ram_usage, disk=25) self.assertEqual( exp_node_0_util, self.strategy.get_node_utilization(node_0)) self.assertEqual( exp_node_1_util, self.strategy.get_node_utilization(node_1)) def test_get_node_capacity(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model node_0 = model.get_node_by_uuid("Node_0") node_util = dict(cpu=40, ram=64, disk=250) self.assertEqual(node_util, self.strategy.get_node_capacity(node_0)) def test_get_relative_node_utilization(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model node = model.get_node_by_uuid('Node_0') rhu = self.strategy.get_relative_node_utilization(node) expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025} self.assertEqual(expected_rhu, rhu) def test_get_relative_cluster_utilization(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model cru = self.strategy.get_relative_cluster_utilization() expected_cru = {'cpu': 0.05, 'disk': 0.05, 'ram': 0.0234375} self.assertEqual(expected_cru, cru) def _test_add_migration(self, instance_state, expect_migration=True, expected_migration_type="live"): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n1 = model.get_node_by_uuid('Node_0') n2 = model.get_node_by_uuid('Node_1') instance_uuid = 'INSTANCE_0' instance = model.get_instance_by_uuid(instance_uuid) instance.state = instance_state self.strategy.add_migration(instance, n1, n2) if expect_migration: self.assertEqual(1, len(self.strategy.solution.actions)) expected = {'action_type': 'migrate', 'input_parameters': { 'destination_node': n2.hostname, 'source_node': n1.hostname, 'migration_type': expected_migration_type, 'resource_id': instance.uuid, 'resource_name': instance.name}} self.assertEqual(expected, self.strategy.solution.actions[0]) else: self.assertEqual(0, len(self.strategy.solution.actions)) def test_add_migration_with_active_state(self): self._test_add_migration(element.InstanceState.ACTIVE.value) def test_add_migration_with_paused_state(self): self._test_add_migration(element.InstanceState.PAUSED.value) def test_add_migration_with_error_state(self): self._test_add_migration(element.InstanceState.ERROR.value, expect_migration=False) def test_add_migration_with_stopped_state(self): self._test_add_migration(element.InstanceState.STOPPED.value, expected_migration_type="cold") def test_is_overloaded(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n1 = model.get_node_by_uuid('Node_0') cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} res = self.strategy.is_overloaded(n1, cc) self.assertFalse(res) cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0} res = self.strategy.is_overloaded(n1, cc) self.assertFalse(res) cc = {'cpu': 0.024, 'ram': 1.0, 'disk': 1.0} res = self.strategy.is_overloaded(n1, cc) self.assertTrue(res) def test_instance_fits(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n = model.get_node_by_uuid('Node_1') instance0 = model.get_instance_by_uuid('INSTANCE_0') cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} res = self.strategy.instance_fits(instance0, n, cc) self.assertTrue(res) cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0} res = self.strategy.instance_fits(instance0, n, cc) self.assertFalse(res) def test_add_action_enable_compute_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n = model.get_node_by_uuid('Node_0') self.strategy.add_action_enable_compute_node(n) expected = [{'action_type': 'change_nova_service_state', 'input_parameters': {'state': 'enabled', 'resource_id': 'Node_0', 'resource_name': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_add_action_disable_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n = model.get_node_by_uuid('Node_0') self.strategy.add_action_disable_node(n) expected = [{'action_type': 'change_nova_service_state', 'input_parameters': { 'state': 'disabled', 'disabled_reason': 'watcher_disabled', 'resource_id': 'Node_0', 'resource_name': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_disable_unused_nodes(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n1 = model.get_node_by_uuid('Node_0') n2 = model.get_node_by_uuid('Node_1') instance_uuid = 'INSTANCE_0' instance = model.get_instance_by_uuid(instance_uuid) self.strategy.disable_unused_nodes() self.assertEqual(0, len(self.strategy.solution.actions)) # Migrate VM to free the node self.strategy.add_migration(instance, n1, n2) self.strategy.disable_unused_nodes() expected = {'action_type': 'change_nova_service_state', 'input_parameters': { 'state': 'disabled', 'disabled_reason': 'watcher_disabled', 'resource_id': 'Node_0', 'resource_name': 'hostname_0'}} self.assertEqual(2, len(self.strategy.solution.actions)) self.assertEqual(expected, self.strategy.solution.actions[1]) def test_offload_phase(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} self.strategy.offload_phase(cc) expected = [] self.assertEqual(expected, self.strategy.solution.actions) def test_consolidation_phase(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n1 = model.get_node_by_uuid('Node_0') n2 = model.get_node_by_uuid('Node_1') instance_uuid = 'INSTANCE_0' instance = model.get_instance_by_uuid(instance_uuid) cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} self.strategy.consolidation_phase(cc) expected = [{'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'source_node': n1.hostname, 'migration_type': 'live', 'resource_id': instance.uuid, 'resource_name': instance.name}}] self.assertEqual(expected, self.strategy.solution.actions) def test_strategy(self): model = self.fake_c_cluster.generate_scenario_2() self.m_c_model.return_value = model self.fake_metrics.model = model result = self.strategy.pre_execute() self.assertIsNone(result) n1 = model.get_node_by_uuid('Node_0') self.strategy.get_relative_cluster_utilization = mock.MagicMock() self.strategy.do_execute() n2_name = self.strategy.solution.actions[0][ 'input_parameters']['destination_node'] n2 = model.get_node_by_name(n2_name) n3_uuid = self.strategy.solution.actions[2][ 'input_parameters']['resource_id'] n3 = model.get_node_by_uuid(n3_uuid) n4_uuid = self.strategy.solution.actions[3][ 'input_parameters']['resource_id'] n4 = model.get_node_by_uuid(n4_uuid) expected = [{'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'source_node': n1.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_3', 'resource_name': ''}}, {'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'source_node': n1.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_1', 'resource_name': ''}}, {'action_type': 'change_nova_service_state', 'input_parameters': {'state': 'disabled', 'disabled_reason': 'watcher_disabled', 'resource_id': n3.uuid, 'resource_name': n3.hostname}}, {'action_type': 'change_nova_service_state', 'input_parameters': {'state': 'disabled', 'disabled_reason': 'watcher_disabled', 'resource_id': n4.uuid, 'resource_name': n4.hostname}}] self.assertEqual(expected, self.strategy.solution.actions) compute_nodes_count = len(self.strategy.get_available_compute_nodes()) number_of_released_nodes = self.strategy.number_of_released_nodes number_of_migrations = self.strategy.number_of_migrations with mock.patch.object( BaseSolution, 'set_efficacy_indicators' ) as mock_set_efficacy_indicators: result = self.strategy.post_execute() mock_set_efficacy_indicators.assert_called_once_with( compute_nodes_count=compute_nodes_count, released_compute_nodes_count=number_of_released_nodes, instance_migrations_count=number_of_migrations ) def test_strategy2(self): model = self.fake_c_cluster.generate_scenario_3() self.m_c_model.return_value = model self.fake_metrics.model = model n1 = model.get_node_by_uuid('Node_0') n2 = model.get_node_by_uuid('Node_1') cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} self.strategy.offload_phase(cc) expected = [{'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_6', 'resource_name': '', 'source_node': n1.hostname}}, {'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_7', 'resource_name': '', 'source_node': n1.hostname}}, {'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_8', 'resource_name': '', 'source_node': n1.hostname}}] self.assertEqual(expected, self.strategy.solution.actions) self.strategy.consolidation_phase(cc) expected.append({'action_type': 'migrate', 'input_parameters': {'destination_node': n1.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_7', 'resource_name': '', 'source_node': n2.hostname}}) self.assertEqual(expected, self.strategy.solution.actions) self.strategy.optimize_solution() del expected[3] del expected[1] self.assertEqual(expected, self.strategy.solution.actions) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py0000664000175000017500000001367200000000000033523 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Junjie-Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections from unittest import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestWorkloadBalance(TestBaseStrategy): scenarios = [ ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestWorkloadBalance, self).setUp() # fake metrics self.fake_metrics = self.fake_datasource_cls() p_datasource = mock.patch.object( strategies.WorkloadBalance, "datasource_backend", new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( statistic_aggregation=self.fake_metrics.mock_get_statistics_wb) self.strategy = strategies.WorkloadBalance( config=mock.Mock(datasource=self.datasource)) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'metrics': 'instance_cpu_usage', 'threshold': 25.0, 'period': 300, 'granularity': 300}) self.strategy.threshold = 25.0 self.strategy._period = 300 self.strategy._meter = 'instance_cpu_usage' self.strategy._granularity = 300 def test_group_hosts_by_cpu_util(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold = 30 n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util() self.assertEqual(n1[0]['compute_node'].uuid, 'Node_0') self.assertEqual(n2[0]['compute_node'].uuid, 'Node_1') self.assertEqual(avg, 8.0) def test_group_hosts_by_ram_util(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model self.strategy._meter = 'instance_ram_usage' self.strategy.threshold = 30 n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util() self.assertEqual(n1[0]['compute_node'].uuid, 'Node_0') self.assertEqual(n2[0]['compute_node'].uuid, 'Node_1') self.assertEqual(avg, 33.0) def test_choose_instance_to_migrate(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util() instance_to_mig = self.strategy.choose_instance_to_migrate( n1, avg, w_map) self.assertEqual(instance_to_mig[0].uuid, 'Node_0') self.assertEqual(instance_to_mig[1].uuid, "73b09e16-35b7-4922-804e-e8f5d9b740fc") def test_choose_instance_notfound(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util() instances = model.get_all_instances() [model.remove_instance(inst) for inst in instances.values()] instance_to_mig = self.strategy.choose_instance_to_migrate( n1, avg, w_map) self.assertIsNone(instance_to_mig) def test_filter_destination_hosts(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model self.strategy.datasource = mock.MagicMock( statistic_aggregation=self.fake_metrics.mock_get_statistics_wb) n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util() instance_to_mig = self.strategy.choose_instance_to_migrate( n1, avg, w_map) dest_hosts = self.strategy.filter_destination_hosts( n2, instance_to_mig[1], avg, w_map) self.assertEqual(len(dest_hosts), 1) self.assertEqual(dest_hosts[0]['compute_node'].uuid, 'Node_1') def test_execute_no_workload(self): model = self.fake_c_cluster.\ generate_scenario_4_with_1_node_no_instance() self.m_c_model.return_value = model solution = self.strategy.execute() self.assertEqual([], solution.actions) def test_execute(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) num_migrations = actions_counter.get("migrate", 0) self.assertEqual(num_migrations, 1) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py 22 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.0000664000175000017500000002703200000000000034434 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica LLC # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from unittest import mock from watcher.common import clients from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestWorkloadStabilization(TestBaseStrategy): scenarios = [ ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestWorkloadStabilization, self).setUp() # fake metrics self.fake_metrics = self.fake_datasource_cls() self.hosts_load_assert = { 'Node_0': {'instance_cpu_usage': 0.07, 'instance_ram_usage': 7.0, 'vcpus': 40}, 'Node_1': {'instance_cpu_usage': 0.07, 'instance_ram_usage': 5, 'vcpus': 40}, 'Node_2': {'instance_cpu_usage': 0.8, 'instance_ram_usage': 29, 'vcpus': 40}, 'Node_3': {'instance_cpu_usage': 0.05, 'instance_ram_usage': 8, 'vcpus': 40}, 'Node_4': {'instance_cpu_usage': 0.05, 'instance_ram_usage': 4, 'vcpus': 40}} p_osc = mock.patch.object( clients, "OpenStackClients") self.m_osc = p_osc.start() self.addCleanup(p_osc.stop) p_datasource = mock.patch.object( strategies.WorkloadStabilization, "datasource_backend", new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( statistic_aggregation=self.fake_metrics.mock_get_statistics) self.strategy = strategies.WorkloadStabilization( config=mock.Mock(datasource=self.datasource)) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update( {'metrics': ["instance_cpu_usage", "instance_ram_usage"], 'thresholds': {"instance_cpu_usage": 0.2, "instance_ram_usage": 0.2}, 'weights': {"instance_cpu_usage_weight": 1.0, "instance_ram_usage_weight": 1.0}, 'instance_metrics': {"instance_cpu_usage": "host_cpu_usage", "instance_ram_usage": "host_ram_usage"}, 'host_choice': 'retry', 'retry_count': 1, 'periods': { "instance": 720, "compute_node": 600, "node": 0}, 'aggregation_method': { "instance": "mean", "compute_node": "mean", "node": ''}}) self.strategy.metrics = ["instance_cpu_usage", "instance_ram_usage"] self.strategy.thresholds = {"instance_cpu_usage": 0.2, "instance_ram_usage": 0.2} self.strategy.weights = {"instance_cpu_usage_weight": 1.0, "instance_ram_usage_weight": 1.0} self.strategy.instance_metrics = { "instance_cpu_usage": "host_cpu_usage", "instance_ram_usage": "host_ram_usage"} self.strategy.host_choice = 'retry' self.strategy.retry_count = 1 self.strategy.periods = { "instance": 720, "compute_node": 600, # node is deprecated "node": 0, } self.strategy.aggregation_method = { "instance": "mean", "compute_node": "mean", # node is deprecated "node": '', } def test_get_instance_load(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model instance0 = model.get_instance_by_uuid("INSTANCE_0") instance_0_dict = { 'uuid': 'INSTANCE_0', 'vcpus': 10, 'instance_cpu_usage': 0.07, 'instance_ram_usage': 2} self.assertEqual( instance_0_dict, self.strategy.get_instance_load(instance0)) def test_get_instance_load_with_no_metrics(self): model = self.fake_c_cluster.\ generate_scenario_1_with_1_node_unavailable() self.m_c_model.return_value = model lost_instance = model.get_instance_by_uuid("LOST_INSTANCE") self.assertIsNone(self.strategy.get_instance_load(lost_instance)) def test_normalize_hosts_load(self): self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() fake_hosts = {'Node_0': {'instance_cpu_usage': 0.07, 'instance_ram_usage': 7}, 'Node_1': {'instance_cpu_usage': 0.05, 'instance_ram_usage': 5}} normalized_hosts = {'Node_0': {'instance_cpu_usage': 0.07, 'instance_ram_usage': 0.05303030303030303}, 'Node_1': {'instance_cpu_usage': 0.05, 'instance_ram_usage': 0.03787878787878788}} self.assertEqual( normalized_hosts, self.strategy.normalize_hosts_load(fake_hosts)) def test_get_available_nodes(self): self.m_c_model.return_value = self.fake_c_cluster. \ generate_scenario_9_with_3_active_plus_1_disabled_nodes() self.assertEqual(3, len(self.strategy.get_available_nodes())) def test_get_hosts_load(self): self.m_c_model.return_value = self.fake_c_cluster.\ generate_scenario_1() self.assertEqual(self.strategy.get_hosts_load(), self.hosts_load_assert) def test_get_hosts_load_with_node_missing(self): self.m_c_model.return_value = \ self.fake_c_cluster.\ generate_scenario_1_with_1_node_unavailable() self.assertEqual(self.hosts_load_assert, self.strategy.get_hosts_load()) def test_get_sd(self): test_cpu_sd = 0.296 test_ram_sd = 9.3 self.assertEqual( round(self.strategy.get_sd( self.hosts_load_assert, 'instance_cpu_usage'), 3), test_cpu_sd) self.assertEqual( round(self.strategy.get_sd( self.hosts_load_assert, 'instance_ram_usage'), 1), test_ram_sd) def test_calculate_weighted_sd(self): sd_case = [0.5, 0.75] self.assertEqual(self.strategy.calculate_weighted_sd(sd_case), 1.25) def test_calculate_migration_case(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model instance = model.get_instance_by_uuid("INSTANCE_5") src_node = model.get_node_by_uuid("Node_2") dst_node = model.get_node_by_uuid("Node_1") result = self.strategy.calculate_migration_case( self.hosts_load_assert, instance, src_node, dst_node)[-1][dst_node.uuid] result['instance_cpu_usage'] = round(result['instance_cpu_usage'], 3) self.assertEqual(result, {'instance_cpu_usage': 0.095, 'instance_ram_usage': 21.0, 'vcpus': 40}) def test_simulate_migrations(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.strategy.host_choice = 'fullsearch' self.assertEqual( 10, len(self.strategy.simulate_migrations(self.hosts_load_assert))) def test_simulate_migrations_with_all_instances_exclude(self): model = \ self.fake_c_cluster.\ generate_scenario_1_with_all_instances_exclude() self.m_c_model.return_value = model self.strategy.host_choice = 'fullsearch' self.assertEqual( 0, len(self.strategy.simulate_migrations(self.hosts_load_assert))) def test_check_threshold(self): self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() self.strategy.thresholds = {'instance_cpu_usage': 0.001, 'instance_ram_usage': 0.2} self.strategy.simulate_migrations = mock.Mock(return_value=True) self.assertTrue(self.strategy.check_threshold()) def test_execute_one_migration(self): self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() self.strategy.thresholds = {'instance_cpu_usage': 0.001, 'instance_ram_usage': 0.2} self.strategy.simulate_migrations = mock.Mock( return_value=[ {'instance': 'INSTANCE_4', 's_host': 'Node_2', 'host': 'Node_1'}] ) with mock.patch.object(self.strategy, 'migrate') as mock_migration: self.strategy.do_execute() mock_migration.assert_called_once_with( 'INSTANCE_4', 'Node_2', 'Node_1') def test_execute_multiply_migrations(self): self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() self.strategy.thresholds = {'instance_cpu_usage': 0.00001, 'instance_ram_usage': 0.0001} self.strategy.simulate_migrations = mock.Mock( return_value=[ {'instance': 'INSTANCE_4', 's_host': 'Node_2', 'host': 'Node_1'}, {'instance': 'INSTANCE_3', 's_host': 'Node_2', 'host': 'Node_3'}] ) with mock.patch.object(self.strategy, 'migrate') as mock_migrate: self.strategy.do_execute() self.assertEqual(mock_migrate.call_count, 2) def test_execute_nothing_to_migrate(self): self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() self.strategy.thresholds = {'instance_cpu_usage': 0.042, 'instance_ram_usage': 0.0001} self.strategy.simulate_migrations = mock.Mock(return_value=False) self.strategy.instance_migrations_count = 0 with mock.patch.object(self.strategy, 'migrate') as mock_migrate: self.strategy.execute() mock_migrate.assert_not_called() def test_parameter_backwards_compat(self): # Set the deprecated node values to a none default value self.strategy.input_parameters.update( {'periods': { "instance": 720, "compute_node": 600, "node": 500 }, 'aggregation_method': { "instance": "mean", "compute_node": "mean", "node": 'min'}}) # Pre execute method handles backwards compatibility of parameters self.strategy.pre_execute() # assert that the compute_node values are updated to the those of node self.assertEqual( 'min', self.strategy.aggregation_method['compute_node']) self.assertEqual( 500, self.strategy.periods['compute_node']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/strategy/strategies/test_zone_migration.py0000664000175000017500000007400100000000000033251 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from unittest import mock import cinderclient import novaclient from watcher.common import cinder_helper from watcher.common import clients from watcher.common import nova_helper from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy volume_uuid_mapping = faker_cluster_state.volume_uuid_mapping class TestZoneMigration(TestBaseStrategy): def setUp(self): super(TestZoneMigration, self).setUp() # fake storage cluster self.fake_s_cluster = faker_cluster_state.FakerStorageModelCollector() p_s_model = mock.patch.object( strategies.ZoneMigration, "storage_model", new_callable=mock.PropertyMock) self.m_s_model = p_s_model.start() self.addCleanup(p_s_model.stop) p_migrate_compute_nodes = mock.patch.object( strategies.ZoneMigration, "migrate_compute_nodes", new_callable=mock.PropertyMock) self.m_migrate_compute_nodes = p_migrate_compute_nodes.start() self.addCleanup(p_migrate_compute_nodes.stop) p_migrate_storage_pools = mock.patch.object( strategies.ZoneMigration, "migrate_storage_pools", new_callable=mock.PropertyMock) self.m_migrate_storage_pools = p_migrate_storage_pools.start() self.addCleanup(p_migrate_storage_pools.stop) p_parallel_total = mock.patch.object( strategies.ZoneMigration, "parallel_total", new_callable=mock.PropertyMock) self.m_parallel_total = p_parallel_total.start() self.addCleanup(p_parallel_total.stop) p_parallel_per_node = mock.patch.object( strategies.ZoneMigration, "parallel_per_node", new_callable=mock.PropertyMock) self.m_parallel_per_node = p_parallel_per_node.start() self.addCleanup(p_parallel_per_node.stop) p_parallel_per_pool = mock.patch.object( strategies.ZoneMigration, "parallel_per_pool", new_callable=mock.PropertyMock) self.m_parallel_per_pool = p_parallel_per_pool.start() self.addCleanup(p_parallel_per_pool.stop) p_priority = mock.patch.object( strategies.ZoneMigration, "priority", new_callable=mock.PropertyMock ) self.m_priority = p_priority.start() self.addCleanup(p_priority.stop) model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model model = self.fake_s_cluster.generate_scenario_1() self.m_s_model.return_value = model self.m_parallel_total.return_value = 6 self.m_parallel_per_node.return_value = 2 self.m_parallel_per_pool.return_value = 2 self.m_audit_scope.return_value = mock.Mock() self.m_migrate_compute_nodes.return_value = [ {"src_node": "src1", "dst_node": "dst1"}, {"src_node": "src2", "dst_node": "dst2"} ] self.m_migrate_storage_pools.return_value = [ {"src_pool": "src1@back1#pool1", "dst_pool": "dst1@back1#pool1", "src_type": "type1", "dst_type": "type1"}, {"src_pool": "src2@back1#pool1", "dst_pool": "dst2@back2#pool1", "src_type": "type2", "dst_type": "type3"} ] self.strategy = strategies.ZoneMigration( config=mock.Mock()) self.m_osc_cls = mock.Mock() self.m_osc = mock.Mock(spec=clients.OpenStackClients) self.m_osc_cls.return_value = self.m_osc m_openstack_clients = mock.patch.object( clients, "OpenStackClients", self.m_osc_cls) m_openstack_clients.start() self.addCleanup(m_openstack_clients.stop) self.m_n_helper_cls = mock.Mock() self.m_n_helper = mock.Mock(spec=nova_helper.NovaHelper) self.m_n_helper_cls.return_value = self.m_n_helper m_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.m_n_helper_cls) m_nova_helper.start() self.addCleanup(m_nova_helper.stop) self.m_c_helper_cls = mock.Mock() self.m_c_helper = mock.Mock(spec=cinder_helper.CinderHelper) self.m_c_helper_cls.return_value = self.m_c_helper m_cinder_helper = mock.patch.object( cinder_helper, "CinderHelper", self.m_c_helper_cls) m_cinder_helper.start() self.addCleanup(m_cinder_helper.stop) @staticmethod def fake_instance(**kwargs): instance = mock.MagicMock(spec=novaclient.v2.servers.Server) instance.id = kwargs.get('id', utils.generate_uuid()) instance.name = kwargs.get('name', 'fake_name') instance.status = kwargs.get('status', 'ACTIVE') instance.tenant_id = kwargs.get('project_id', None) instance.flavor = {'id': kwargs.get('flavor_id', None)} setattr(instance, 'OS-EXT-SRV-ATTR:host', kwargs.get('host')) setattr(instance, 'created_at', kwargs.get('created_at', '1977-01-01T00:00:00')) setattr(instance, 'OS-EXT-STS:vm_state', kwargs.get('state', 'active')) return instance @staticmethod def fake_volume(**kwargs): volume = mock.MagicMock(spec=cinderclient.v3.volumes.Volume) volume.id = kwargs.get('id', utils.generate_uuid()) volume.name = kwargs.get('name', 'fake_name') volume.status = kwargs.get('status', 'available') tenant_id = kwargs.get('project_id', None) setattr(volume, 'os-vol-tenant-attr:tenant_id', tenant_id) setattr(volume, 'os-vol-host-attr:host', kwargs.get('host')) setattr(volume, 'size', kwargs.get('size', '1')) setattr(volume, 'created_at', kwargs.get('created_at', '1977-01-01T00:00:00')) volume.volume_type = kwargs.get('volume_type', 'type1') return volume @staticmethod def fake_flavor(**kwargs): flavor = mock.MagicMock() flavor.id = kwargs.get('id', None) flavor.ram = kwargs.get('mem_size', '1') flavor.vcpus = kwargs.get('vcpu_num', '1') flavor.disk = kwargs.get('disk_size', '1') return flavor def test_get_src_node_list(self): instances = self.strategy.get_src_node_list() self.assertEqual(sorted(instances), sorted(["src1", "src2"])) def test_get_instances(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") instance_on_src2 = self.fake_instance( host="src2", id="INSTANCE_2", name="INSTANCE_2") instance_on_src3 = self.fake_instance( host="src3", id="INSTANCE_3", name="INSTANCE_3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] instances = self.strategy.get_instances() # src1,src2 is in instances # src3 is not in instances self.assertIn(instance_on_src1, instances) self.assertIn(instance_on_src2, instances) self.assertNotIn(instance_on_src3, instances) def test_get_volumes(self): volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src2 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") volume_on_src3 = self.fake_volume(host="src3@back2#pool1", id=volume_uuid_mapping["volume_3"], name="volume_3") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, volume_on_src2, volume_on_src3, ] volumes = self.strategy.get_volumes() # src1,src2 is in instances # src3 is not in instances self.assertIn(volume_on_src1, volumes) self.assertIn(volume_on_src2, volumes) self.assertNotIn(volume_on_src3, volumes) # execute # def test_execute_live_migrate_instance(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, ] self.m_c_helper.get_volume_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("live", 0)) global_efficacy_value = solution.global_efficacy[0].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_cold_migrate_instance(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") setattr(instance_on_src1, "status", "SHUTOFF") setattr(instance_on_src1, "OS-EXT-STS:vm_state", "stopped") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, ] self.m_c_helper.get_volume_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("cold", 0)) global_efficacy_value = solution.global_efficacy[1].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_migrate_volume(self): volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("migrate", 0)) global_efficacy_value = solution.global_efficacy[2].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_retype_volume(self): volume_on_src2 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") self.m_c_helper.get_volume_list.return_value = [ volume_on_src2, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("retype", 0)) global_efficacy_value = solution.global_efficacy[2].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_swap_volume(self): volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src1.status = "in-use" self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("swap", 0)) global_efficacy_value = solution.global_efficacy[3].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_live_migrate_instance_parallel(self): instance_on_src1_1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") instance_on_src1_2 = self.fake_instance( host="src2", id="INSTANCE_2", name="INSTANCE_2") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1_1, instance_on_src1_2, ] self.m_c_helper.get_volume_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(2, migration_types.get("live", 0)) global_efficacy_value = solution.global_efficacy[0].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_parallel_per_node(self): self.m_parallel_per_node.return_value = 1 instance_on_src1_1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") instance_on_src1_2 = self.fake_instance( host="src1", id="INSTANCE_2", name="INSTANCE_2") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1_1, instance_on_src1_2, ] self.m_c_helper.get_volume_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("live", 0)) global_efficacy_value = solution.global_efficacy[0].get('value', 0) self.assertEqual(50.0, global_efficacy_value) def test_execute_migrate_volume_parallel(self): volume_on_src1_1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src1_2 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1_1, volume_on_src1_2, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(2, migration_types.get("migrate", 0)) global_efficacy_value = solution.global_efficacy[2].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_parallel_per_pool(self): self.m_parallel_per_pool.return_value = 1 volume_on_src1_1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src1_2 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1_1, volume_on_src1_2, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("migrate", 0)) global_efficacy_value = solution.global_efficacy[2].get('value', 0) self.assertEqual(50.0, global_efficacy_value) def test_execute_parallel_total(self): self.m_parallel_total.return_value = 1 self.m_parallel_per_pool.return_value = 1 volume_on_src1_1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src1_2 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") volume_on_src2_1 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_3"], name="volume_3") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1_1, volume_on_src1_2, volume_on_src2_1, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("migrate", 0)) # priority filter # def test_get_priority_filter_list(self): self.m_priority.return_value = { "project": ["pj1"], "compute_node": ["compute1", "compute2"], "compute": ["cpu_num"], "storage_pool": ["pool1", "pool2"], "storage": ["size"] } filters = self.strategy.get_priority_filter_list() self.assertIn(strategies.zone_migration.ComputeHostSortFilter, map(lambda l: l.__class__, filters)) # noqa: E741 self.assertIn(strategies.zone_migration.StorageHostSortFilter, map(lambda l: l.__class__, filters)) # noqa: E741 self.assertIn(strategies.zone_migration.ProjectSortFilter, map(lambda l: l.__class__, filters)) # noqa: E741 # ComputeHostSortFilter # def test_filtered_targets_compute_nodes(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") instance_on_src2 = self.fake_instance( host="src2", id="INSTANCE_2", name="INSTANCE_2") instance_on_src3 = self.fake_instance( host="src3", id="INSTANCE_3", name="INSTANCE_3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] self.m_priority.return_value = { "compute_node": ["src1", "src2"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src1, instance_on_src2]) # StorageHostSortFilter # def test_filtered_targets_storage_pools(self): volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src2 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") volume_on_src3 = self.fake_volume(host="src3@back2#pool1", id=volume_uuid_mapping["volume_3"], name="volume_3") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, volume_on_src2, volume_on_src3, ] self.m_n_helper.get_instance_list.return_value = [] self.m_priority.return_value = { "storage_pool": ["src1@back1#pool1", "src2@back1#pool1"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get("volume"), [volume_on_src1, volume_on_src2]) # ProjectSortFilter # def test_filtered_targets_project(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name='INSTANCE_1', project_id="pj2") instance_on_src2 = self.fake_instance( host="src2", id="INSTANCE_2", name='INSTANCE_2', project_id="pj1") instance_on_src3 = self.fake_instance( host="src3", id="INSTANCE_3", name='INSTANCE_3', project_id="pj3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1", project_id="pj2") volume_on_src2 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2", project_id="pj1") volume_on_src3 = self.fake_volume(host="src3@back2#pool1", id=volume_uuid_mapping["volume_3"], name="volume_3", project_id="pj3") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, volume_on_src2, volume_on_src3, ] self.m_priority.return_value = { "project": ["pj1"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src2, instance_on_src1]) self.assertEqual(targets.get('volume'), [volume_on_src2, volume_on_src1]) self.assertEqual(targets, {"instance": [instance_on_src2, instance_on_src1], "volume": [volume_on_src2, volume_on_src1]}) # ComputeSpecSortFilter # def test_filtered_targets_instance_mem_size(self): flavor_64 = self.fake_flavor(id="1", mem_size="64") flavor_128 = self.fake_flavor(id="2", mem_size="128") flavor_512 = self.fake_flavor(id="3", mem_size="512") self.m_n_helper.get_flavor_list.return_value = [ flavor_64, flavor_128, flavor_512, ] instance_on_src1 = self.fake_instance(host="src1", name="INSTANCE_1", id="INSTANCE_1", flavor_id="1") instance_on_src2 = self.fake_instance(host="src2", name="INSTANCE_2", id="INSTANCE_2", flavor_id="2") instance_on_src3 = self.fake_instance(host="src3", name="INSTANCE_3", id="INSTANCE_3", flavor_id="3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] self.m_priority.return_value = { "compute": ["mem_size"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src2, instance_on_src1]) def test_filtered_targets_instance_vcpu_num(self): flavor_1 = self.fake_flavor(id="1", vcpu_num="1") flavor_2 = self.fake_flavor(id="2", vcpu_num="2") flavor_3 = self.fake_flavor(id="3", vcpu_num="3") self.m_n_helper.get_flavor_list.return_value = [ flavor_1, flavor_2, flavor_3, ] instance_on_src1 = self.fake_instance(host="src1", name="INSTANCE_1", id="INSTANCE_1", flavor_id="1") instance_on_src2 = self.fake_instance(host="src2", name="INSTANCE_2", id="INSTANCE_2", flavor_id="2") instance_on_src3 = self.fake_instance(host="src3", name="INSTANCE_3", id="INSTANCE_3", flavor_id="3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] self.m_priority.return_value = { "compute": ["vcpu_num"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src2, instance_on_src1]) def test_filtered_targets_instance_disk_size(self): flavor_1 = self.fake_flavor(id="1", disk_size="1") flavor_2 = self.fake_flavor(id="2", disk_size="2") flavor_3 = self.fake_flavor(id="3", disk_size="3") self.m_n_helper.get_flavor_list.return_value = [ flavor_1, flavor_2, flavor_3, ] instance_on_src1 = self.fake_instance(host="src1", name="INSTANCE_1", id="INSTANCE_1", flavor_id="1") instance_on_src2 = self.fake_instance(host="src2", name="INSTANCE_2", id="INSTANCE_2", flavor_id="2") instance_on_src3 = self.fake_instance(host="src3", name="INSTANCE_3", id="INSTANCE_3", flavor_id="3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] self.m_priority.return_value = { "compute": ["disk_size"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src2, instance_on_src1]) def test_filtered_targets_instance_created_at(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1", created_at="2017-10-30T00:00:00") instance_on_src2 = self.fake_instance( host="src2", id="INSTANCE_2", name="INSTANCE_2", created_at="1977-03-29T03:03:03") instance_on_src3 = self.fake_instance( host="src3", id="INSTANCE_3", name="INSTANCE_3", created_at="1977-03-29T03:03:03") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] self.m_priority.return_value = { "compute": ["created_at"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src2, instance_on_src1]) # StorageSpecSortFilter # def test_filtered_targets_storage_size(self): volume_on_src1 = self.fake_volume( host="src1@back1#pool1", size="1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src2 = self.fake_volume( host="src2@back1#pool1", size="2", id=volume_uuid_mapping["volume_2"], name="volume_2") volume_on_src3 = self.fake_volume( host="src3@back2#pool1", size="3", id=volume_uuid_mapping["volume_3"], name="volume_3") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, volume_on_src2, volume_on_src3, ] self.m_n_helper.get_instance_list.return_value = [] self.m_priority.return_value = { "storage": ["size"] } targets = self.strategy.filtered_targets() self.assertEqual(targets.get("volume"), [volume_on_src2, volume_on_src1]) def test_filtered_targets_storage_created_at(self): volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1", created_at="2017-10-30T00:00:00") volume_on_src2 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2", created_at="1977-03-29T03:03:03") volume_on_src3 = self.fake_volume(host="src3@back2#pool1", id=volume_uuid_mapping["volume_3"], name="volume_3", created_at="1977-03-29T03:03:03") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, volume_on_src2, volume_on_src3, ] self.m_n_helper.get_instance_list.return_value = [] self.m_priority.return_value = { "storage": ["created_at"] } targets = self.strategy.filtered_targets() self.assertEqual(targets.get("volume"), [volume_on_src2, volume_on_src1]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/test_gmr.py0000664000175000017500000000244700000000000025003 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.decision_engine import gmr from watcher.decision_engine.model.collector import manager from watcher.tests import base class TestGmrPlugin(base.TestCase): @mock.patch.object(manager.CollectorManager, "get_collectors") def test_show_models(self, m_get_collectors): m_to_string = mock.Mock(return_value="") m_get_collectors.return_value = { "test_model": mock.Mock( cluster_data_model=mock.Mock(to_string=m_to_string))} output = gmr.show_models() self.assertEqual(1, m_to_string.call_count) self.assertIn("", output) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/test_rpcapi.py0000664000175000017500000000474600000000000025500 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import oslo_messaging as om from watcher.common import exception from watcher.common import utils from watcher.decision_engine import rpcapi from watcher.tests import base class TestDecisionEngineAPI(base.TestCase): api = rpcapi.DecisionEngineAPI() def test_get_api_version(self): with mock.patch.object(om.RPCClient, 'call') as mock_call: expected_context = self.context self.api.check_api_version(expected_context) mock_call.assert_called_once_with( expected_context, 'check_api_version', api_version=rpcapi.DecisionEngineAPI().api_version) def test_execute_audit_throw_exception(self): audit_uuid = "uuid" self.assertRaises(exception.InvalidUuidOrName, self.api.trigger_audit, audit_uuid) def test_execute_audit_without_error(self): with mock.patch.object(om.RPCClient, 'cast') as mock_cast: audit_uuid = utils.generate_uuid() self.api.trigger_audit(self.context, audit_uuid) mock_cast.assert_called_once_with( self.context, 'trigger_audit', audit_uuid=audit_uuid) def test_get_strategy_info(self): with mock.patch.object(om.RPCClient, 'call') as mock_call: self.api.get_strategy_info(self.context, "dummy") mock_call.assert_called_once_with( self.context, 'get_strategy_info', strategy_name="dummy") def test_get_data_model_info(self): with mock.patch.object(om.RPCClient, 'call') as mock_call: self.api.get_data_model_info( self.context, data_model_type='compute', audit=None) mock_call.assert_called_once_with( self.context, 'get_data_model_info', data_model_type='compute', audit=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/test_scheduling.py0000664000175000017500000001240000000000000026331 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from apscheduler.schedulers import background from apscheduler.triggers import interval as interval_trigger import eventlet from unittest import mock from oslo_config import cfg from oslo_utils import uuidutils from watcher.decision_engine.loading import default as default_loading from watcher.decision_engine import scheduling from watcher.decision_engine.strategy.strategies import dummy_strategy from watcher import notifications from watcher import objects from watcher.tests import base from watcher.tests.db import base as db_base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.objects import utils as obj_utils class TestCancelOngoingAudits(db_base.DbTestCase): def setUp(self): super(TestCancelOngoingAudits, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.goal = obj_utils.create_test_goal( self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) self.strategy = obj_utils.create_test_strategy( self.context, name=dummy_strategy.DummyStrategy.get_name(), goal_id=self.goal.id) audit_template = obj_utils.create_test_audit_template( self.context, strategy_id=self.strategy.id) self.audit = obj_utils.create_test_audit( self.context, id=999, name='My Audit 999', uuid=uuidutils.generate_uuid(), audit_template_id=audit_template.id, goal_id=self.goal.id, audit_type=objects.audit.AuditType.ONESHOT.value, goal=self.goal, hostname='hostname1', state=objects.audit.State.ONGOING) cfg.CONF.set_override("host", "hostname1") @mock.patch.object(objects.audit.Audit, 'save') @mock.patch.object(objects.audit.Audit, 'list') def test_cancel_ongoing_audits(self, m_list, m_save): m_list.return_value = [self.audit] scheduler = scheduling.DecisionEngineSchedulingService() scheduler.cancel_ongoing_audits() m_list.assert_called() m_save.assert_called() self.assertEqual(self.audit.state, objects.audit.State.CANCELLED) @mock.patch.object(objects.audit.Audit, 'save') @mock.patch.object(objects.audit.Audit, 'list') class TestDecisionEngineSchedulingService(base.TestCase): @mock.patch.object( default_loading.ClusterDataModelCollectorLoader, 'load') @mock.patch.object( default_loading.ClusterDataModelCollectorLoader, 'list_available') @mock.patch.object(background.BackgroundScheduler, 'start') def test_start_de_scheduling_service(self, m_start, m_list_available, m_load, m_list, m_save): m_list_available.return_value = { 'fake': faker_cluster_state.FakerModelCollector} fake_collector = faker_cluster_state.FakerModelCollector( config=mock.Mock(period=777)) m_load.return_value = fake_collector scheduler = scheduling.DecisionEngineSchedulingService() scheduler.start() m_start.assert_called_once_with(scheduler) jobs = scheduler.get_jobs() self.assertEqual(2, len(jobs)) job = jobs[0] self.assertTrue(bool(fake_collector.cluster_data_model)) self.assertIsInstance(job.trigger, interval_trigger.IntervalTrigger) @mock.patch.object( default_loading.ClusterDataModelCollectorLoader, 'load') @mock.patch.object( default_loading.ClusterDataModelCollectorLoader, 'list_available') @mock.patch.object(background.BackgroundScheduler, 'start') def test_execute_sync_job_fails(self, m_start, m_list_available, m_load, m_list, m_save): fake_config = mock.Mock(period=.01) fake_collector = faker_cluster_state.FakerModelCollector( config=fake_config) fake_collector.synchronize = mock.Mock( side_effect=lambda: eventlet.sleep(.5)) m_list_available.return_value = { 'fake': faker_cluster_state.FakerModelCollector} m_load.return_value = fake_collector scheduler = scheduling.DecisionEngineSchedulingService() scheduler.start() m_start.assert_called_once_with(scheduler) jobs = scheduler.get_jobs() self.assertEqual(2, len(jobs)) job = jobs[0] job.func() self.assertFalse(bool(fake_collector.cluster_data_model)) self.assertIsInstance(job.trigger, interval_trigger.IntervalTrigger) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/decision_engine/test_sync.py0000664000175000017500000007354700000000000025203 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_serialization import jsonutils from watcher.common import context from watcher.common import utils from watcher.decision_engine.loading import default from watcher.decision_engine import sync from watcher import objects from watcher.tests.db import base from watcher.tests.decision_engine import fake_goals from watcher.tests.decision_engine import fake_strategies class TestSyncer(base.DbTestCase): def setUp(self): super(TestSyncer, self).setUp() self.ctx = context.make_context() # This mock simulates the strategies discovery done in discover() self.m_available_strategies = mock.Mock(return_value={ fake_strategies.FakeDummy1Strategy1.get_name(): fake_strategies.FakeDummy1Strategy1, fake_strategies.FakeDummy1Strategy2.get_name(): fake_strategies.FakeDummy1Strategy2, fake_strategies.FakeDummy2Strategy3.get_name(): fake_strategies.FakeDummy2Strategy3, fake_strategies.FakeDummy2Strategy4.get_name(): fake_strategies.FakeDummy2Strategy4, }) self.m_available_goals = mock.Mock(return_value={ fake_goals.FakeDummy1.get_name(): fake_goals.FakeDummy1, fake_goals.FakeDummy2.get_name(): fake_goals.FakeDummy2, }) self.goal1_spec = fake_goals.FakeDummy1( config=mock.Mock()).get_efficacy_specification() self.goal2_spec = fake_goals.FakeDummy2( config=mock.Mock()).get_efficacy_specification() p_goals_load = mock.patch.object( default.DefaultGoalLoader, 'load', side_effect=lambda goal: self.m_available_goals()[goal]()) p_goals = mock.patch.object( default.DefaultGoalLoader, 'list_available', self.m_available_goals) p_strategies = mock.patch.object( default.DefaultStrategyLoader, 'list_available', self.m_available_strategies) p_goals.start() p_goals_load.start() p_strategies.start() self.syncer = sync.Syncer() self.addCleanup(p_goals.stop) self.addCleanup(p_goals_load.stop) self.addCleanup(p_strategies.stop) @staticmethod def _find_created_modified_unmodified_ids(before, after): created = { a_item.id: a_item for a_item in after if a_item.uuid not in (b_item.uuid for b_item in before) } modified = { a_item.id: a_item for a_item in after if a_item.as_dict() not in ( b_items.as_dict() for b_items in before) } unmodified = { a_item.id: a_item for a_item in after if a_item.as_dict() in ( b_items.as_dict() for b_items in before) } return created, modified, unmodified @mock.patch.object(objects.Strategy, "soft_delete") @mock.patch.object(objects.Strategy, "save") @mock.patch.object(objects.Strategy, "create") @mock.patch.object(objects.Strategy, "list") @mock.patch.object(objects.Goal, "get_by_name") @mock.patch.object(objects.Goal, "soft_delete") @mock.patch.object(objects.Goal, "save") @mock.patch.object(objects.Goal, "create") @mock.patch.object(objects.Goal, "list") def test_sync_empty_db( self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): m_g_get_by_name.side_effect = [ objects.Goal(self.ctx, id=i) for i in range(1, 10)] m_g_list.return_value = [] m_s_list.return_value = [] self.syncer.sync() self.assertEqual(2, m_g_create.call_count) self.assertEqual(0, m_g_save.call_count) self.assertEqual(0, m_g_soft_delete.call_count) self.assertEqual(4, m_s_create.call_count) self.assertEqual(0, m_s_save.call_count) self.assertEqual(0, m_s_soft_delete.call_count) @mock.patch.object(objects.Strategy, "soft_delete") @mock.patch.object(objects.Strategy, "save") @mock.patch.object(objects.Strategy, "create") @mock.patch.object(objects.Strategy, "list") @mock.patch.object(objects.Goal, "get_by_name") @mock.patch.object(objects.Goal, "soft_delete") @mock.patch.object(objects.Goal, "save") @mock.patch.object(objects.Goal, "create") @mock.patch.object(objects.Goal, "list") def test_sync_with_existing_goal( self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): m_g_get_by_name.side_effect = [ objects.Goal(self.ctx, id=i) for i in range(1, 10)] m_g_list.return_value = [ objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=( self.goal1_spec.serialize_indicators_specs())) ] m_s_list.return_value = [] self.syncer.sync() self.assertEqual(1, m_g_create.call_count) self.assertEqual(0, m_g_save.call_count) self.assertEqual(0, m_g_soft_delete.call_count) self.assertEqual(4, m_s_create.call_count) self.assertEqual(0, m_s_save.call_count) self.assertEqual(0, m_s_soft_delete.call_count) @mock.patch.object(objects.Strategy, "soft_delete") @mock.patch.object(objects.Strategy, "save") @mock.patch.object(objects.Strategy, "create") @mock.patch.object(objects.Strategy, "list") @mock.patch.object(objects.Goal, "get_by_name") @mock.patch.object(objects.Goal, "soft_delete") @mock.patch.object(objects.Goal, "save") @mock.patch.object(objects.Goal, "create") @mock.patch.object(objects.Goal, "list") def test_sync_with_existing_strategy( self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): m_g_get_by_name.side_effect = [ objects.Goal(self.ctx, id=i) for i in range(1, 10)] m_g_list.return_value = [ objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=( self.goal1_spec.serialize_indicators_specs())) ] m_s_list.return_value = [ objects.Strategy(self.ctx, id=1, name="strategy_1", goal_id=1, display_name="Strategy 1", parameters_spec='{}') ] self.syncer.sync() self.assertEqual(1, m_g_create.call_count) self.assertEqual(0, m_g_save.call_count) self.assertEqual(0, m_g_soft_delete.call_count) self.assertEqual(3, m_s_create.call_count) self.assertEqual(0, m_s_save.call_count) self.assertEqual(0, m_s_soft_delete.call_count) @mock.patch.object(objects.Strategy, "soft_delete") @mock.patch.object(objects.Strategy, "save") @mock.patch.object(objects.Strategy, "create") @mock.patch.object(objects.Strategy, "list") @mock.patch.object(objects.Goal, "get_by_name") @mock.patch.object(objects.Goal, "soft_delete") @mock.patch.object(objects.Goal, "save") @mock.patch.object(objects.Goal, "create") @mock.patch.object(objects.Goal, "list") def test_sync_with_modified_goal( self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): m_g_get_by_name.side_effect = [ objects.Goal(self.ctx, id=i) for i in range(1, 10)] m_g_list.return_value = [objects.Goal( self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_2", display_name="original", efficacy_specification=self.goal2_spec.serialize_indicators_specs() )] m_s_list.return_value = [] self.syncer.sync() self.assertEqual(2, m_g_create.call_count) self.assertEqual(0, m_g_save.call_count) self.assertEqual(1, m_g_soft_delete.call_count) self.assertEqual(4, m_s_create.call_count) self.assertEqual(0, m_s_save.call_count) self.assertEqual(0, m_s_soft_delete.call_count) @mock.patch.object(objects.Strategy, "soft_delete") @mock.patch.object(objects.Strategy, "save") @mock.patch.object(objects.Strategy, "create") @mock.patch.object(objects.Strategy, "list") @mock.patch.object(objects.Goal, "get_by_name") @mock.patch.object(objects.Goal, "soft_delete") @mock.patch.object(objects.Goal, "save") @mock.patch.object(objects.Goal, "create") @mock.patch.object(objects.Goal, "list") def test_sync_with_modified_strategy( self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): m_g_get_by_name.side_effect = [ objects.Goal(self.ctx, id=i) for i in range(1, 10)] m_g_list.return_value = [ objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=( self.goal1_spec.serialize_indicators_specs())) ] m_s_list.return_value = [ objects.Strategy(self.ctx, id=1, name="strategy_1", goal_id=1, display_name="original", parameters_spec='{}') ] self.syncer.sync() self.assertEqual(1, m_g_create.call_count) self.assertEqual(0, m_g_save.call_count) self.assertEqual(0, m_g_soft_delete.call_count) self.assertEqual(4, m_s_create.call_count) self.assertEqual(0, m_s_save.call_count) self.assertEqual(1, m_s_soft_delete.call_count) def test_end2end_sync_goals_with_modified_goal_and_strategy(self): # ### Setup ### # # Here, we simulate goals and strategies already discovered in the past # that were saved in DB # Should stay unmodified after sync() goal1 = objects.Goal( self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=( self.goal1_spec.serialize_indicators_specs())) # Should be modified by the sync() goal2 = objects.Goal( self.ctx, id=2, uuid=utils.generate_uuid(), name="dummy_2", display_name="Original", efficacy_specification=self.goal2_spec.serialize_indicators_specs() ) goal1.create() goal2.create() # Should stay unmodified after sync() strategy1 = objects.Strategy( self.ctx, id=1, name="strategy_1", uuid=utils.generate_uuid(), display_name="Strategy 1", goal_id=goal1.id) # Should be modified after sync() because its related goal has been # modified strategy2 = objects.Strategy( self.ctx, id=2, name="strategy_2", uuid=utils.generate_uuid(), display_name="Strategy 2", goal_id=goal2.id) # Should be modified after sync() because its strategy name has been # modified strategy3 = objects.Strategy( self.ctx, id=3, name="strategy_3", uuid=utils.generate_uuid(), display_name="Original", goal_id=goal1.id) # Should be modified after sync() because both its related goal # and its strategy name have been modified strategy4 = objects.Strategy( self.ctx, id=4, name="strategy_4", uuid=utils.generate_uuid(), display_name="Original", goal_id=goal2.id) strategy1.create() strategy2.create() strategy3.create() strategy4.create() # Here we simulate audit_templates that were already created in the # past and hence saved within the Watcher DB # Should stay unmodified after sync() audit_template1 = objects.AuditTemplate( self.ctx, id=1, name="Synced AT1", uuid=utils.generate_uuid(), goal_id=goal1.id, strategy_id=strategy1.id) # Should be modified by the sync() because its associated goal # has been modified (compared to the defined fake goals) audit_template2 = objects.AuditTemplate( self.ctx, id=2, name="Synced AT2", uuid=utils.generate_uuid(), goal_id=goal2.id, strategy_id=strategy2.id) # Should be modified by the sync() because its associated strategy # has been modified (compared to the defined fake strategies) audit_template3 = objects.AuditTemplate( self.ctx, id=3, name="Synced AT3", uuid=utils.generate_uuid(), goal_id=goal1.id, strategy_id=strategy3.id) # Modified because of both because its associated goal and associated # strategy should be modified audit_template4 = objects.AuditTemplate( self.ctx, id=4, name="Synced AT4", uuid=utils.generate_uuid(), goal_id=goal2.id, strategy_id=strategy4.id) audit_template1.create() audit_template2.create() audit_template3.create() audit_template4.create() # Should stay unmodified after sync() audit1 = objects.Audit( self.ctx, id=1, uuid=utils.generate_uuid(), name='audit_1', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal1.id, strategy_id=strategy1.id, auto_trigger=False) # Should be modified by the sync() because its associated goal # has been modified (compared to the defined fake goals) audit2 = objects.Audit( self.ctx, id=2, uuid=utils.generate_uuid(), name='audit_2', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal2.id, strategy_id=strategy2.id, auto_trigger=False) # Should be modified by the sync() because its associated strategy # has been modified (compared to the defined fake strategies) audit3 = objects.Audit( self.ctx, id=3, uuid=utils.generate_uuid(), name='audit_3', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal1.id, strategy_id=strategy3.id, auto_trigger=False) # Modified because of both because its associated goal and associated # strategy should be modified (compared to the defined fake # goals/strategies) audit4 = objects.Audit( self.ctx, id=4, uuid=utils.generate_uuid(), name='audit_4', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal2.id, strategy_id=strategy4.id, auto_trigger=False) audit1.create() audit2.create() audit3.create() audit4.create() # Should stay unmodified after sync() action_plan1 = objects.ActionPlan( self.ctx, id=1, uuid=utils.generate_uuid(), audit_id=audit1.id, strategy_id=strategy1.id, state='DOESNOTMATTER', global_efficacy=[]) # Stale after syncing because the goal of the audit has been modified # (compared to the defined fake goals) action_plan2 = objects.ActionPlan( self.ctx, id=2, uuid=utils.generate_uuid(), audit_id=audit2.id, strategy_id=strategy2.id, state='DOESNOTMATTER', global_efficacy=[]) # Stale after syncing because the strategy has been modified # (compared to the defined fake strategies) action_plan3 = objects.ActionPlan( self.ctx, id=3, uuid=utils.generate_uuid(), audit_id=audit3.id, strategy_id=strategy3.id, state='DOESNOTMATTER', global_efficacy=[]) # Stale after syncing because both the strategy and the related audit # have been modified (compared to the defined fake goals/strategies) action_plan4 = objects.ActionPlan( self.ctx, id=4, uuid=utils.generate_uuid(), audit_id=audit4.id, strategy_id=strategy4.id, state='DOESNOTMATTER', global_efficacy=[]) action_plan1.create() action_plan2.create() action_plan3.create() action_plan4.create() before_goals = objects.Goal.list(self.ctx) before_strategies = objects.Strategy.list(self.ctx) before_audit_templates = objects.AuditTemplate.list(self.ctx) before_audits = objects.Audit.list(self.ctx) before_action_plans = objects.ActionPlan.list(self.ctx) # ### Action under test ### # try: self.syncer.sync() except Exception as exc: self.fail(exc) # ### Assertions ### # after_goals = objects.Goal.list(self.ctx) after_strategies = objects.Strategy.list(self.ctx) after_audit_templates = objects.AuditTemplate.list(self.ctx) after_audits = objects.Audit.list(self.ctx) after_action_plans = objects.ActionPlan.list(self.ctx) self.assertEqual(2, len(before_goals)) self.assertEqual(4, len(before_strategies)) self.assertEqual(4, len(before_audit_templates)) self.assertEqual(4, len(before_audits)) self.assertEqual(4, len(before_action_plans)) self.assertEqual(2, len(after_goals)) self.assertEqual(4, len(after_strategies)) self.assertEqual(4, len(after_audit_templates)) self.assertEqual(4, len(after_audits)) self.assertEqual(4, len(after_action_plans)) self.assertEqual( {"dummy_1", "dummy_2"}, set([g.name for g in after_goals])) self.assertEqual( {"strategy_1", "strategy_2", "strategy_3", "strategy_4"}, set([s.name for s in after_strategies])) created_goals, modified_goals, unmodified_goals = ( self._find_created_modified_unmodified_ids( before_goals, after_goals)) created_strategies, modified_strategies, unmodified_strategies = ( self._find_created_modified_unmodified_ids( before_strategies, after_strategies)) (created_audit_templates, modified_audit_templates, unmodified_audit_templates) = ( self._find_created_modified_unmodified_ids( before_audit_templates, after_audit_templates)) created_audits, modified_audits, unmodified_audits = ( self._find_created_modified_unmodified_ids( before_audits, after_audits)) (created_action_plans, modified_action_plans, unmodified_action_plans) = ( self._find_created_modified_unmodified_ids( before_action_plans, after_action_plans)) dummy_1_spec = jsonutils.loads( self.goal1_spec.serialize_indicators_specs()) dummy_2_spec = jsonutils.loads( self.goal2_spec.serialize_indicators_specs()) self.assertEqual( [dummy_1_spec, dummy_2_spec], [g.efficacy_specification for g in after_goals]) self.assertEqual(1, len(created_goals)) self.assertEqual(3, len(created_strategies)) self.assertEqual(0, len(created_audits)) self.assertEqual(0, len(created_action_plans)) self.assertEqual(2, strategy2.goal_id) self.assertNotEqual( set([strategy2.id, strategy3.id, strategy4.id]), set(modified_strategies)) self.assertEqual(set([strategy1.id]), set(unmodified_strategies)) self.assertEqual( set([audit_template2.id, audit_template3.id, audit_template4.id]), set(modified_audit_templates)) self.assertEqual(set([audit_template1.id]), set(unmodified_audit_templates)) self.assertEqual( set([audit2.id, audit3.id, audit4.id]), set(modified_audits)) self.assertEqual(set([audit1.id]), set(unmodified_audits)) self.assertEqual( set([action_plan2.id, action_plan3.id, action_plan4.id]), set(modified_action_plans)) self.assertTrue( all(ap.state == objects.action_plan.State.CANCELLED for ap in modified_action_plans.values())) self.assertEqual(set([action_plan1.id]), set(unmodified_action_plans)) def test_end2end_sync_goals_with_removed_goal_and_strategy(self): # ### Setup ### # # We simulate the fact that we removed 2 strategies self.m_available_strategies.return_value = { fake_strategies.FakeDummy1Strategy1.get_name(): fake_strategies.FakeDummy1Strategy1 } # We simulate the fact that we removed the dummy_2 goal self.m_available_goals.return_value = { fake_goals.FakeDummy1.get_name(): fake_goals.FakeDummy1, } # Should stay unmodified after sync() goal1 = objects.Goal( self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=self.goal1_spec.serialize_indicators_specs() ) # To be removed by the sync() goal2 = objects.Goal( self.ctx, id=2, uuid=utils.generate_uuid(), name="dummy_2", display_name="Dummy 2", efficacy_specification=self.goal2_spec.serialize_indicators_specs() ) goal1.create() goal2.create() # Should stay unmodified after sync() strategy1 = objects.Strategy( self.ctx, id=1, name="strategy_1", uuid=utils.generate_uuid(), display_name="Strategy 1", goal_id=goal1.id) # To be removed by the sync() because strategy entry point does not # exist anymore strategy2 = objects.Strategy( self.ctx, id=2, name="strategy_2", uuid=utils.generate_uuid(), display_name="Strategy 2", goal_id=goal1.id) # To be removed by the sync() because the goal has been soft deleted # and because the strategy entry point does not exist anymore strategy3 = objects.Strategy( self.ctx, id=3, name="strategy_3", uuid=utils.generate_uuid(), display_name="Original", goal_id=goal2.id) strategy1.create() strategy2.create() strategy3.create() # Here we simulate audit_templates that were already created in the # past and hence saved within the Watcher DB # The strategy of this audit template will be dereferenced # as it does not exist anymore audit_template1 = objects.AuditTemplate( self.ctx, id=1, name="Synced AT1", uuid=utils.generate_uuid(), goal_id=goal1.id, strategy_id=strategy1.id) # Stale after syncing because the goal has been soft deleted audit_template2 = objects.AuditTemplate( self.ctx, id=2, name="Synced AT2", uuid=utils.generate_uuid(), goal_id=goal2.id, strategy_id=strategy2.id) audit_template1.create() audit_template2.create() # Should stay unmodified after sync() audit1 = objects.Audit( self.ctx, id=1, uuid=utils.generate_uuid(), name='audit_1', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal1.id, strategy_id=strategy1.id, auto_trigger=False) # Stale after syncing because the goal has been soft deleted audit2 = objects.Audit( self.ctx, id=2, uuid=utils.generate_uuid(), name='audit_2', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal2.id, strategy_id=strategy2.id, auto_trigger=False) audit1.create() audit2.create() # Stale after syncing because its related strategy has been be # soft deleted action_plan1 = objects.ActionPlan( self.ctx, id=1, uuid=utils.generate_uuid(), audit_id=audit1.id, strategy_id=strategy1.id, state='DOESNOTMATTER', global_efficacy=[]) # Stale after syncing because its related goal has been soft deleted action_plan2 = objects.ActionPlan( self.ctx, id=2, uuid=utils.generate_uuid(), audit_id=audit2.id, strategy_id=strategy2.id, state='DOESNOTMATTER', global_efficacy=[]) action_plan1.create() action_plan2.create() before_goals = objects.Goal.list(self.ctx) before_strategies = objects.Strategy.list(self.ctx) before_audit_templates = objects.AuditTemplate.list(self.ctx) before_audits = objects.Audit.list(self.ctx) before_action_plans = objects.ActionPlan.list(self.ctx) # ### Action under test ### # try: self.syncer.sync() except Exception as exc: self.fail(exc) # ### Assertions ### # after_goals = objects.Goal.list(self.ctx) after_strategies = objects.Strategy.list(self.ctx) after_audit_templates = objects.AuditTemplate.list(self.ctx) after_audits = objects.Audit.list(self.ctx) after_action_plans = objects.ActionPlan.list(self.ctx) self.assertEqual(2, len(before_goals)) self.assertEqual(3, len(before_strategies)) self.assertEqual(2, len(before_audit_templates)) self.assertEqual(2, len(before_audits)) self.assertEqual(2, len(before_action_plans)) self.assertEqual(1, len(after_goals)) self.assertEqual(1, len(after_strategies)) self.assertEqual(2, len(after_audit_templates)) self.assertEqual(2, len(after_audits)) self.assertEqual(2, len(after_action_plans)) self.assertEqual( {"dummy_1"}, set([g.name for g in after_goals])) self.assertEqual( {"strategy_1"}, set([s.name for s in after_strategies])) created_goals, modified_goals, unmodified_goals = ( self._find_created_modified_unmodified_ids( before_goals, after_goals)) created_strategies, modified_strategies, unmodified_strategies = ( self._find_created_modified_unmodified_ids( before_strategies, after_strategies)) (created_audit_templates, modified_audit_templates, unmodified_audit_templates) = ( self._find_created_modified_unmodified_ids( before_audit_templates, after_audit_templates)) created_audits, modified_audits, unmodified_audits = ( self._find_created_modified_unmodified_ids( before_audits, after_audits)) (created_action_plans, modified_action_plans, unmodified_action_plans) = ( self._find_created_modified_unmodified_ids( before_action_plans, after_action_plans)) self.assertEqual(0, len(created_goals)) self.assertEqual(0, len(created_strategies)) self.assertEqual(0, len(created_audits)) self.assertEqual(0, len(created_action_plans)) self.assertEqual(set([audit_template2.id]), set(modified_audit_templates)) self.assertEqual(set([audit_template1.id]), set(unmodified_audit_templates)) self.assertEqual(set([audit2.id]), set(modified_audits)) self.assertEqual(set([audit1.id]), set(unmodified_audits)) self.assertEqual(set([action_plan2.id]), set(modified_action_plans)) self.assertTrue( all(ap.state == objects.action_plan.State.CANCELLED for ap in modified_action_plans.values())) self.assertEqual(set([action_plan1.id]), set(unmodified_action_plans)) def test_sync_strategies_with_removed_goal(self): # ### Setup ### # goal1 = objects.Goal( self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=self.goal1_spec.serialize_indicators_specs() ) goal2 = objects.Goal( self.ctx, id=2, uuid=utils.generate_uuid(), name="dummy_2", display_name="Dummy 2", efficacy_specification=self.goal2_spec.serialize_indicators_specs() ) goal1.create() goal2.create() strategy1 = objects.Strategy( self.ctx, id=1, name="strategy_1", uuid=utils.generate_uuid(), display_name="Strategy 1", goal_id=goal1.id) strategy2 = objects.Strategy( self.ctx, id=2, name="strategy_2", uuid=utils.generate_uuid(), display_name="Strategy 2", goal_id=goal2.id) strategy1.create() strategy2.create() # to be removed by some reasons goal2.soft_delete() before_goals = objects.Goal.list(self.ctx) before_strategies = objects.Strategy.list(self.ctx) # ### Action under test ### # try: self.syncer.sync() except Exception as exc: self.fail(exc) # ### Assertions ### # after_goals = objects.Goal.list(self.ctx) after_strategies = objects.Strategy.list(self.ctx) self.assertEqual(1, len(before_goals)) self.assertEqual(2, len(before_strategies)) self.assertEqual(2, len(after_goals)) self.assertEqual(4, len(after_strategies)) self.assertEqual( {"dummy_1", "dummy_2"}, set([g.name for g in after_goals])) self.assertEqual( {"strategy_1", "strategy_2", "strategy_3", "strategy_4"}, set([s.name for s in after_strategies])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/fake_policy.py0000664000175000017500000000400000000000000022305 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. policy_data = """ { "admin_api": "role:admin or role:administrator", "show_password": "!", "default": "rule:admin_api", "action:detail": "", "action:get": "", "action:get_all": "", "action_plan:delete": "", "action_plan:detail": "", "action_plan:get": "", "action_plan:get_all": "", "action_plan:update": "", "audit:create": "", "audit:delete": "", "audit:detail": "", "audit:get": "", "audit:get_all": "", "audit:update": "", "audit_template:create": "", "audit_template:delete": "", "audit_template:detail": "", "audit_template:get": "", "audit_template:get_all": "", "audit_template:update": "", "goal:detail": "", "goal:get": "", "goal:get_all": "", "scoring_engine:detail": "", "scoring_engine:get": "", "scoring_engine:get_all": "", "strategy:detail": "", "strategy:get": "", "strategy:get_all": "", "strategy:state": "", "service:detail": "", "service:get": "", "service:get_all": "", "data_model:get_all": "" } """ policy_data_compat_juno = """ { "admin": "role:admin or role:administrator", "admin_api": "is_admin:True", "default": "rule:admin_api" } """ def get_policy_data(compat): if not compat: return policy_data elif compat == 'juno': return policy_data_compat_juno else: raise Exception('Policy data for %s not available' % compat) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/fakes.py0000664000175000017500000001004400000000000021116 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import requests from unittest import mock fakeAuthTokenHeaders = {'X-User-Id': u'773a902f022949619b5c2f32cd89d419', 'X-Roles': u'admin, ResellerAdmin, _member_', 'X-Project-Id': u'5588aebbcdc24e17a061595f80574376', 'X-Project-Name': 'test', 'X-User-Name': 'test', 'X-Auth-Token': u'5588aebbcdc24e17a061595f80574376', 'X-Forwarded-For': u'10.10.10.10, 11.11.11.11', 'X-Service-Catalog': u'{test: 12345}', 'X-Identity-Status': 'Confirmed', 'X-User-Domain-Name': 'domain', 'X-Project-Domain-Id': 'project_domain_id', 'X-User-Domain-Id': 'user_domain_id', } class FakePecanRequest(mock.Mock): def __init__(self, **kwargs): super(FakePecanRequest, self).__init__(**kwargs) self.host_url = 'http://test_url:8080/test' self.context = {} self.body = '' self.content_type = 'text/unicode' self.params = {} self.path = '/v1/services' self.headers = fakeAuthTokenHeaders self.environ = {} def __setitem__(self, index, value): setattr(self, index, value) class FakePecanResponse(mock.Mock): def __init__(self, **kwargs): super(FakePecanResponse, self).__init__(**kwargs) self.status = None class FakeApp(object): pass class FakeService(mock.Mock): def __init__(self, **kwargs): super(FakeService, self).__init__(**kwargs) self.__tablename__ = 'service' self.__resource__ = 'services' self.user_id = 'fake user id' self.project_id = 'fake project id' self.uuid = 'test_uuid' self.id = 8 self.name = 'james' self.service_type = 'not_this' self.description = 'amazing' self.tags = ['this', 'and that'] self.read_only = True def as_dict(self): return dict(service_type=self.service_type, user_id=self.user_id, project_id=self.project_id, uuid=self.uuid, id=self.id, name=self.name, tags=self.tags, read_only=self.read_only, description=self.description) class FakeAuthProtocol(mock.Mock): def __init__(self, **kwargs): super(FakeAuthProtocol, self).__init__(**kwargs) self.app = FakeApp() self.config = '' class FakeResponse(requests.Response): def __init__(self, status_code, content=None, headers=None): """A requests.Response that can be used as a mock return_value. A key feature is that the instance will evaluate to True or False like a real Response, based on the status_code. Properties like ok, status_code, text, and content, and methods like json(), work as expected based on the inputs. :param status_code: Integer HTTP response code (200, 404, etc.) :param content: String supplying the payload content of the response. Using a json-encoded string will make the json() method behave as expected. :param headers: Dict of HTTP header values to set. """ super(FakeResponse, self).__init__() self.status_code = status_code if content: self._content = content if headers: self.headers = headers ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6591353 python_watcher-14.0.0/watcher/tests/notifications/0000775000175000017500000000000000000000000022325 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/notifications/__init__.py0000664000175000017500000000000000000000000024424 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/notifications/test_action_notification.py0000664000175000017500000005367100000000000027775 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import freezegun from unittest import mock import oslo_messaging as om from watcher.common import exception from watcher.common import rpc from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.objects import utils @freezegun.freeze_time('2016-10-18T09:52:05.219414') class TestActionNotification(base.DbTestCase): def setUp(self): super(TestActionNotification, self).setUp() p_get_notifier = mock.patch.object(rpc, 'get_notifier') m_get_notifier = p_get_notifier.start() self.addCleanup(p_get_notifier.stop) self.m_notifier = mock.Mock(spec=om.Notifier) def fake_get_notifier(publisher_id): self.m_notifier.publisher_id = publisher_id return self.m_notifier m_get_notifier.side_effect = fake_get_notifier self.goal = utils.create_test_goal(mock.Mock()) self.strategy = utils.create_test_strategy(mock.Mock()) self.audit = utils.create_test_audit(mock.Mock(), strategy_id=self.strategy.id) self.action_plan = utils.create_test_action_plan(mock.Mock()) def test_send_invalid_action_plan(self): action_plan = utils.get_test_action_plan( mock.Mock(), state='DOESNOTMATTER', audit_id=1) self.assertRaises( exception.InvalidActionPlan, notifications.action_plan.send_update, mock.MagicMock(), action_plan, host='node0') def test_send_action_update(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.ONGOING, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) notifications.action.send_update( mock.MagicMock(), action, host='node0', old_state=objects.action.State.PENDING) # The 1st notification is because we created the object. # The 2nd notification is because we created the action plan object. self.assertEqual(4, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionUpdatePayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state_update': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionStateUpdatePayload', 'watcher_object.data': { 'old_state': 'PENDING', 'state': 'ONGOING' } }, 'state': 'ONGOING', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } }, payload ) def test_send_action_plan_create(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.PENDING, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) notifications.action.send_create(mock.MagicMock(), action, host='node0') self.assertEqual(4, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionCreatePayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'PENDING', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } }, payload ) def test_send_action_delete(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.DELETED, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) notifications.action.send_delete(mock.MagicMock(), action, host='node0') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(4, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionDeletePayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'DELETED', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } }, payload ) def test_send_action_execution(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.PENDING, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) notifications.action.send_execution_notification( mock.MagicMock(), action, 'execution', phase='start', host='node0') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(4, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'event_type': 'action.execution.start', 'payload': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionExecutionPayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'fault': None, 'updated_at': None, 'state': 'PENDING', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } } }, notification ) def test_send_action_execution_with_error(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.FAILED, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) try: # This is to load the exception in sys.exc_info() raise exception.WatcherException("TEST") except exception.WatcherException: notifications.action.send_execution_notification( mock.MagicMock(), action, 'execution', phase='error', host='node0', priority='error') self.assertEqual(1, self.m_notifier.error.call_count) notification = self.m_notifier.error.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'event_type': 'action.execution.error', 'payload': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionExecutionPayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'fault': { 'watcher_object.data': { 'exception': u'WatcherException', 'exception_message': u'TEST', 'function_name': ( 'test_send_action_execution_with_error'), 'module_name': ( 'watcher.tests.notifications.' 'test_action_notification') }, 'watcher_object.name': 'ExceptionPayload', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0' }, 'updated_at': None, 'state': 'FAILED', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } } }, notification ) def test_send_action_cancel(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.PENDING, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) notifications.action.send_cancel_notification( mock.MagicMock(), action, 'cancel', phase='start', host='node0') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(4, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'event_type': 'action.cancel.start', 'payload': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionCancelPayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'fault': None, 'updated_at': None, 'state': 'PENDING', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } } }, notification ) def test_send_action_cancel_with_error(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.FAILED, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) try: # This is to load the exception in sys.exc_info() raise exception.WatcherException("TEST") except exception.WatcherException: notifications.action.send_cancel_notification( mock.MagicMock(), action, 'cancel', phase='error', host='node0', priority='error') self.assertEqual(1, self.m_notifier.error.call_count) notification = self.m_notifier.error.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'event_type': 'action.cancel.error', 'payload': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionCancelPayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'fault': { 'watcher_object.data': { 'exception': u'WatcherException', 'exception_message': u'TEST', 'function_name': ( 'test_send_action_cancel_with_error'), 'module_name': ( 'watcher.tests.notifications.' 'test_action_notification') }, 'watcher_object.name': 'ExceptionPayload', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0' }, 'updated_at': None, 'state': 'FAILED', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } } }, notification ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/notifications/test_action_plan_notification.py0000664000175000017500000007026000000000000031000 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import freezegun from unittest import mock import oslo_messaging as om from watcher.common import exception from watcher.common import rpc from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.objects import utils @freezegun.freeze_time('2016-10-18T09:52:05.219414') class TestActionPlanNotification(base.DbTestCase): def setUp(self): super(TestActionPlanNotification, self).setUp() p_get_notifier = mock.patch.object(rpc, 'get_notifier') m_get_notifier = p_get_notifier.start() self.addCleanup(p_get_notifier.stop) self.m_notifier = mock.Mock(spec=om.Notifier) def fake_get_notifier(publisher_id): self.m_notifier.publisher_id = publisher_id return self.m_notifier m_get_notifier.side_effect = fake_get_notifier self.goal = utils.create_test_goal(mock.Mock()) self.audit = utils.create_test_audit(mock.Mock(), interval=None) self.strategy = utils.create_test_strategy(mock.Mock()) def test_send_invalid_action_plan(self): action_plan = utils.get_test_action_plan( mock.Mock(), state='DOESNOTMATTER', audit_id=1) self.assertRaises( exception.InvalidActionPlan, notifications.action_plan.send_update, mock.MagicMock(), action_plan, host='node0') def test_send_action_plan_update(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy) notifications.action_plan.send_update( mock.MagicMock(), action_plan, host='node0', old_state=objects.action_plan.State.PENDING) # The 1st notification is because we created the object. # The 2nd notification is because we created the action plan object. self.assertEqual(3, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.2" }, "deleted_at": None, "state": "ONGOING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "state_update": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "old_state": "PENDING", "state": "ONGOING" }, "watcher_object.name": "ActionPlanStateUpdatePayload" }, }, "watcher_object.name": "ActionPlanUpdatePayload" }, payload ) def test_send_action_plan_create(self): action_plan = utils.get_test_action_plan( mock.Mock(), state=objects.action_plan.State.PENDING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit.as_dict(), strategy=self.strategy.as_dict()) notifications.action_plan.send_create( mock.MagicMock(), action_plan, host='node0') self.assertEqual(2, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.2" }, "deleted_at": None, "state": "PENDING", "updated_at": None, "created_at": None, }, "watcher_object.name": "ActionPlanCreatePayload" }, payload ) def test_send_action_plan_delete(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.DELETED, audit_id=self.audit.id, strategy_id=self.strategy.id) notifications.action_plan.send_delete( mock.MagicMock(), action_plan, host='node0') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(3, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.2" }, "deleted_at": None, "state": "DELETED", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", }, "watcher_object.name": "ActionPlanDeletePayload" }, payload ) def test_send_action_plan_action(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy) notifications.action_plan.send_action_notification( mock.MagicMock(), action_plan, host='node0', action='execution', phase='start') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(3, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "action_plan.execution.start", "payload": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": None, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.2", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" } }, "global_efficacy": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" }, "watcher_object.name": "ActionPlanActionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) def test_send_action_plan_action_with_error(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy) try: # This is to load the exception in sys.exc_info() raise exception.WatcherException("TEST") except exception.WatcherException: notifications.action_plan.send_action_notification( mock.MagicMock(), action_plan, host='node0', action='execution', priority='error', phase='error') self.assertEqual(1, self.m_notifier.error.call_count) notification = self.m_notifier.error.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "action_plan.execution.error", "payload": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": { "watcher_object.data": { "exception": "WatcherException", "exception_message": "TEST", "function_name": ( "test_send_action_plan_action_with_error"), "module_name": "watcher.tests.notifications." "test_action_plan_notification" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.2" }, "global_efficacy": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" }, "watcher_object.name": "ActionPlanActionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) def test_send_action_plan_cancel(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy) notifications.action_plan.send_cancel_notification( mock.MagicMock(), action_plan, host='node0', action='cancel', phase='start') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(3, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "action_plan.cancel.start", "payload": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": None, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.2", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", 'name': 'My Audit', "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" } }, "global_efficacy": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" }, "watcher_object.name": "ActionPlanCancelPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) def test_send_action_plan_cancel_with_error(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy) try: # This is to load the exception in sys.exc_info() raise exception.WatcherException("TEST") except exception.WatcherException: notifications.action_plan.send_cancel_notification( mock.MagicMock(), action_plan, host='node0', action='cancel', priority='error', phase='error') self.assertEqual(1, self.m_notifier.error.call_count) notification = self.m_notifier.error.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "action_plan.cancel.error", "payload": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": { "watcher_object.data": { "exception": "WatcherException", "exception_message": "TEST", "function_name": ( "test_send_action_plan_cancel_with_error"), "module_name": "watcher.tests.notifications." "test_action_plan_notification" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", 'name': 'My Audit', "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.2" }, "global_efficacy": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" }, "watcher_object.name": "ActionPlanCancelPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/notifications/test_audit_notification.py0000664000175000017500000005276000000000000027624 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import freezegun from unittest import mock import oslo_messaging as om from watcher.common import exception from watcher.common import rpc from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.objects import utils @freezegun.freeze_time('2016-10-18T09:52:05.219414') class TestAuditNotification(base.DbTestCase): def setUp(self): super(TestAuditNotification, self).setUp() p_get_notifier = mock.patch.object(rpc, 'get_notifier') m_get_notifier = p_get_notifier.start() self.addCleanup(p_get_notifier.stop) self.m_notifier = mock.Mock(spec=om.Notifier) def fake_get_notifier(publisher_id): self.m_notifier.publisher_id = publisher_id return self.m_notifier m_get_notifier.side_effect = fake_get_notifier self.goal = utils.create_test_goal(mock.Mock()) self.strategy = utils.create_test_strategy(mock.Mock()) def test_send_invalid_audit(self): audit = utils.get_test_audit( mock.Mock(), interval=None, state='DOESNOTMATTER', goal_id=1) self.assertRaises( exception.InvalidAudit, notifications.audit.send_update, mock.MagicMock(), audit, host='node0') def test_send_audit_update_with_strategy(self): audit = utils.create_test_audit( mock.Mock(), interval=None, state=objects.audit.State.ONGOING, goal_id=self.goal.id, strategy_id=self.strategy.id, goal=self.goal, strategy=self.strategy) notifications.audit.send_update( mock.MagicMock(), audit, host='node0', old_state=objects.audit.State.PENDING) # The 1st notification is because we created the object. self.assertEqual(2, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "name": "TEST", "efficacy_specification": [], "created_at": "2016-10-18T09:52:05Z", "display_name": "test goal", "deleted_at": None }, "watcher_object.name": "GoalPayload" }, "deleted_at": None, "scope": [], "state": "ONGOING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "state_update": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "old_state": "PENDING", "state": "ONGOING" }, "watcher_object.name": "AuditStateUpdatePayload" }, "audit_type": "ONESHOT" }, "watcher_object.name": "AuditUpdatePayload" }, payload ) def test_send_audit_update_without_strategy(self): audit = utils.get_test_audit( mock.Mock(), interval=None, state=objects.audit.State.ONGOING, goal_id=self.goal.id, goal=self.goal) notifications.audit.send_update( mock.MagicMock(), audit, host='node0', old_state=objects.audit.State.PENDING) self.assertEqual(1, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "name": "TEST", "efficacy_specification": [], "created_at": "2016-10-18T09:52:05Z", "display_name": "test goal", "deleted_at": None }, "watcher_object.name": "GoalPayload" }, "strategy_uuid": None, "strategy": None, "deleted_at": None, "scope": [], "state": "ONGOING", "updated_at": None, "created_at": None, "state_update": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "old_state": "PENDING", "state": "ONGOING" }, "watcher_object.name": "AuditStateUpdatePayload" }, "audit_type": "ONESHOT" }, "watcher_object.name": "AuditUpdatePayload" }, payload ) def test_send_audit_create(self): audit = utils.get_test_audit( mock.Mock(), interval=None, state=objects.audit.State.PENDING, goal_id=self.goal.id, strategy_id=self.strategy.id, goal=self.goal.as_dict(), strategy=self.strategy.as_dict()) notifications.audit.send_create( mock.MagicMock(), audit, host='node0') self.assertEqual(1, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "name": "TEST", "efficacy_specification": [], "created_at": "2016-10-18T09:52:05Z", "display_name": "test goal", "deleted_at": None }, "watcher_object.name": "GoalPayload" }, "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": None, "audit_type": "ONESHOT" }, "watcher_object.name": "AuditCreatePayload" }, payload ) def test_send_audit_delete(self): audit = utils.create_test_audit( mock.Mock(), interval=None, state=objects.audit.State.DELETED, goal_id=self.goal.id, strategy_id=self.strategy.id) notifications.audit.send_delete( mock.MagicMock(), audit, host='node0') # The 1st notification is because we created the object. self.assertEqual(2, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "name": "TEST", "efficacy_specification": [], "created_at": "2016-10-18T09:52:05Z", "display_name": "test goal", "deleted_at": None }, "watcher_object.name": "GoalPayload" }, "deleted_at": None, "scope": [], "state": "DELETED", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "AuditDeletePayload" }, payload ) def test_send_audit_action(self): audit = utils.create_test_audit( mock.Mock(), interval=None, state=objects.audit.State.ONGOING, goal_id=self.goal.id, strategy_id=self.strategy.id, goal=self.goal, strategy=self.strategy) notifications.audit.send_action_notification( mock.MagicMock(), audit, host='node0', action='strategy', phase='start') # The 1st notification is because we created the object. self.assertEqual(2, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] notification = self.m_notifier.info.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "audit.strategy.start", "payload": { "watcher_object.data": { "audit_type": "ONESHOT", "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": None, "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test goal", "efficacy_specification": [], "name": "TEST", "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652" }, "watcher_object.name": "GoalPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "interval": None, "next_run_time": None, "auto_trigger": False, "name": "My Audit", "parameters": {}, "scope": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d" }, "watcher_object.name": "AuditActionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) def test_send_audit_action_with_error(self): audit = utils.create_test_audit( mock.Mock(), interval=None, state=objects.audit.State.ONGOING, goal_id=self.goal.id, strategy_id=self.strategy.id, goal=self.goal, strategy=self.strategy) try: # This is to load the exception in sys.exc_info() raise exception.WatcherException("TEST") except exception.WatcherException: notifications.audit.send_action_notification( mock.MagicMock(), audit, host='node0', action='strategy', priority='error', phase='error') self.assertEqual(1, self.m_notifier.error.call_count) notification = self.m_notifier.error.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "audit.strategy.error", "payload": { "watcher_object.data": { "audit_type": "ONESHOT", "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": { "watcher_object.data": { "exception": "WatcherException", "exception_message": "TEST", "function_name": ( "test_send_audit_action_with_error"), "module_name": "watcher.tests.notifications." "test_audit_notification" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test goal", "efficacy_specification": [], "name": "TEST", "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652" }, "watcher_object.name": "GoalPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "interval": None, "next_run_time": None, "auto_trigger": False, "name": "My Audit", "parameters": {}, "scope": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d" }, "watcher_object.name": "AuditActionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/notifications/test_notification.py0000664000175000017500000003610200000000000026426 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from unittest import mock from oslo_versionedobjects import fixture from watcher.common import exception from watcher.common import rpc from watcher.notifications import base as notificationbase from watcher.objects import base from watcher.objects import fields as wfields from watcher.tests import base as testbase from watcher.tests.objects import test_objects class TestNotificationBase(testbase.TestCase): @base.WatcherObjectRegistry.register_if(False) class TestObject(base.WatcherObject): VERSION = '1.0' fields = { 'field_1': wfields.StringField(), 'field_2': wfields.IntegerField(), 'not_important_field': wfields.IntegerField(), } @base.WatcherObjectRegistry.register_if(False) class TestNotificationPayload(notificationbase.NotificationPayloadBase): VERSION = '1.0' SCHEMA = { 'field_1': ('source_field', 'field_1'), 'field_2': ('source_field', 'field_2'), } fields = { 'extra_field': wfields.StringField(), # filled by ctor 'field_1': wfields.StringField(), # filled by the schema 'field_2': wfields.IntegerField(), # filled by the schema } def populate_schema(self, source_field): super(TestNotificationBase.TestNotificationPayload, self).populate_schema(source_field=source_field) @base.WatcherObjectRegistry.register_if(False) class TestNotificationPayloadEmptySchema( notificationbase.NotificationPayloadBase): VERSION = '1.0' fields = { 'extra_field': wfields.StringField(), # filled by ctor } @notificationbase.notification_sample('test-update-1.json') @notificationbase.notification_sample('test-update-2.json') @base.WatcherObjectRegistry.register_if(False) class TestNotification(notificationbase.NotificationBase): VERSION = '1.0' fields = { 'payload': wfields.ObjectField('TestNotificationPayload') } @base.WatcherObjectRegistry.register_if(False) class TestNotificationEmptySchema(notificationbase.NotificationBase): VERSION = '1.0' fields = { 'payload': wfields.ObjectField( 'TestNotificationPayloadEmptySchema') } expected_payload = { 'watcher_object.name': 'TestNotificationPayload', 'watcher_object.data': { 'extra_field': 'test string', 'field_1': 'test1', 'field_2': 42}, 'watcher_object.version': '1.0', 'watcher_object.namespace': 'watcher'} def setUp(self): super(TestNotificationBase, self).setUp() self.my_obj = self.TestObject(field_1='test1', field_2=42, not_important_field=13) self.payload = self.TestNotificationPayload( extra_field='test string') self.payload.populate_schema(source_field=self.my_obj) self.notification = self.TestNotification( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE, phase=wfields.NotificationPhase.START), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=self.payload) def _verify_notification(self, mock_notifier, mock_context, expected_event_type, expected_payload): mock_notifier.prepare.assert_called_once_with( publisher_id='watcher-fake:fake-host') mock_notify = mock_notifier.prepare.return_value.info self.assertTrue(mock_notify.called) self.assertEqual(mock_notify.call_args[0][0], mock_context) self.assertEqual(mock_notify.call_args[1]['event_type'], expected_event_type) actual_payload = mock_notify.call_args[1]['payload'] self.assertEqual(expected_payload, actual_payload) @mock.patch.object(rpc, 'NOTIFIER') def test_emit_notification(self, mock_notifier): mock_context = mock.Mock() mock_context.to_dict.return_value = {} self.notification.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update.start', expected_payload=self.expected_payload) @mock.patch.object(rpc, 'NOTIFIER') def test_no_emit_notifs_disabled(self, mock_notifier): # Make sure notifications aren't emitted when notification_level # isn't defined, indicating notifications should be disabled self.config(notification_level=None) notif = self.TestNotification( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE, phase=wfields.NotificationPhase.START), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() notif.emit(mock_context) self.assertFalse(mock_notifier.called) @mock.patch.object(rpc, 'NOTIFIER') def test_no_emit_level_too_low(self, mock_notifier): # Make sure notification doesn't emit when set notification # level < config level self.config(notification_level='warning') notif = self.TestNotification( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE, phase=wfields.NotificationPhase.START), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() notif.emit(mock_context) self.assertFalse(mock_notifier.called) @mock.patch.object(rpc, 'NOTIFIER') def test_emit_event_type_without_phase(self, mock_notifier): noti = self.TestNotification( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload=self.expected_payload) @mock.patch.object(rpc, 'NOTIFIER') def test_not_possible_to_emit_if_not_populated(self, mock_notifier): non_populated_payload = self.TestNotificationPayload( extra_field='test string') noti = self.TestNotification( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=non_populated_payload) mock_context = mock.Mock() self.assertRaises(exception.NotificationPayloadError, noti.emit, mock_context) self.assertFalse(mock_notifier.called) @mock.patch.object(rpc, 'NOTIFIER') def test_empty_schema(self, mock_notifier): non_populated_payload = self.TestNotificationPayloadEmptySchema( extra_field='test string') noti = self.TestNotificationEmptySchema( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=non_populated_payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload={ 'watcher_object.name': 'TestNotificationPayloadEmptySchema', 'watcher_object.data': {'extra_field': 'test string'}, 'watcher_object.version': '1.0', 'watcher_object.namespace': 'watcher'}) def test_sample_decorator(self): self.assertEqual(2, len(self.TestNotification.samples)) self.assertIn('test-update-1.json', self.TestNotification.samples) self.assertIn('test-update-2.json', self.TestNotification.samples) expected_notification_fingerprints = { 'EventType': '1.3-bc4f4bc4a497d789e5a3c30f921edae1', 'ExceptionNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ExceptionPayload': '1.0-4516ae282a55fe2fd5c754967ee6248b', 'NotificationPublisher': '1.0-bbbc1402fb0e443a3eb227cc52b61545', 'TerseAuditPayload': '1.2-0fda1751c39f29b539944c2b44690f65', 'AuditPayload': '1.2-d30cc1639404ed380b0742b781db690e', 'AuditStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', 'AuditUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'AuditUpdatePayload': '1.1-e32c3f69c353d47948afa44359246828', 'AuditCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'AuditCreatePayload': '1.1-d30cc1639404ed380b0742b781db690e', 'AuditDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', 'AuditDeletePayload': '1.1-d30cc1639404ed380b0742b781db690e', 'AuditActionNotification': '1.0-9b69de0724fda8310d05e18418178866', 'AuditActionPayload': '1.1-3d19c75dd9cdf2a833d0367b234e20d2', 'GoalPayload': '1.0-fa1fecb8b01dd047eef808ded4d50d1a', 'StrategyPayload': '1.0-94f01c137b083ac236ae82573c1fcfc1', 'ActionPlanActionPayload': '1.1-5be9fa7ca9e544322bdded5593e36edb', 'ActionPlanCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionPlanCreatePayload': '1.1-6a3c3bf1d1f822e33633c49088699d4e', 'ActionPlanDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionPlanDeletePayload': '1.1-6a3c3bf1d1f822e33633c49088699d4e', 'ActionPlanPayload': '1.1-6a3c3bf1d1f822e33633c49088699d4e', 'ActionPlanStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', 'ActionPlanUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionPlanUpdatePayload': '1.1-4ecd6571784cec2656725003ce431fdd', 'ActionPlanActionNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionPlanCancelNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionCancelNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionCreatePayload': '1.0-519b93b7450319d8928b4b6e6362df31', 'ActionDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionDeletePayload': '1.0-519b93b7450319d8928b4b6e6362df31', 'ActionExecutionNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionExecutionPayload': '1.0-bff9f820a2abf7bb6d7027b7450157df', 'ActionPayload': '1.0-519b93b7450319d8928b4b6e6362df31', 'ActionStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', 'ActionUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionUpdatePayload': '1.0-03306c7e7f4d49ac328c261eff6b30b8', 'ActionPlanCancelPayload': '1.1-5be9fa7ca9e544322bdded5593e36edb', 'ActionCancelPayload': '1.0-bff9f820a2abf7bb6d7027b7450157df', 'TerseActionPlanPayload': '1.1-63008f013817407df9194c2a59fda6b0', 'ServiceUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ServicePayload': '1.0-9c5a9bc51e6606e0ec3cf95baf698f4f', 'ServiceStatusUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', 'ServiceUpdatePayload': '1.0-e0e9812a45958974693a723a2c820c3f' } class TestNotificationObjectVersions(testbase.TestCase): def setUp(self): super(TestNotificationObjectVersions, self).setUp() base.WatcherObjectRegistry.register_notification_objects() def test_versions(self): checker = fixture.ObjectVersionChecker( test_objects.get_watcher_objects()) expected_notification_fingerprints.update( test_objects.expected_object_fingerprints) expected, actual = checker.test_hashes( expected_notification_fingerprints) self.assertEqual(expected, actual, 'Some notification objects have changed; please make ' 'sure the versions have been bumped, and then update ' 'their hashes here.') def test_notification_payload_version_depends_on_the_schema(self): @base.WatcherObjectRegistry.register_if(False) class TestNotificationPayload( notificationbase.NotificationPayloadBase): VERSION = '1.0' SCHEMA = { 'field_1': ('source_field', 'field_1'), 'field_2': ('source_field', 'field_2'), } fields = { 'extra_field': wfields.StringField(), # filled by ctor 'field_1': wfields.StringField(), # filled by the schema 'field_2': wfields.IntegerField(), # filled by the schema } checker = fixture.ObjectVersionChecker( {'TestNotificationPayload': (TestNotificationPayload,)}) old_hash = checker.get_hashes(extra_data_func=get_extra_data) TestNotificationPayload.SCHEMA['field_3'] = ('source_field', 'field_3') new_hash = checker.get_hashes(extra_data_func=get_extra_data) self.assertNotEqual(old_hash, new_hash) def get_extra_data(obj_class): extra_data = tuple() # Get the SCHEMA items to add to the fingerprint # if we are looking at a notification if issubclass(obj_class, notificationbase.NotificationPayloadBase): schema_data = collections.OrderedDict( sorted(obj_class.SCHEMA.items())) extra_data += (schema_data,) return extra_data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/notifications/test_service_notifications.py0000664000175000017500000000557300000000000030341 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import freezegun import oslo_messaging as om from oslo_utils import timeutils from watcher.common import rpc from watcher import notifications from watcher.objects import service as w_service from watcher.tests.db import base from watcher.tests.objects import utils @freezegun.freeze_time('2016-10-18T09:52:05.219414') class TestActionPlanNotification(base.DbTestCase): def setUp(self): super(TestActionPlanNotification, self).setUp() p_get_notifier = mock.patch.object(rpc, 'get_notifier') m_get_notifier = p_get_notifier.start() self.addCleanup(p_get_notifier.stop) self.m_notifier = mock.Mock(spec=om.Notifier) def fake_get_notifier(publisher_id): self.m_notifier.publisher_id = publisher_id return self.m_notifier m_get_notifier.side_effect = fake_get_notifier def test_service_failed(self): service = utils.get_test_service(mock.Mock(), created_at=timeutils.utcnow()) state = w_service.ServiceStatus.FAILED notifications.service.send_service_update(mock.MagicMock(), service, state, host='node0') notification = self.m_notifier.warning.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual({ 'watcher_object.data': { 'last_seen_up': '2016-09-22T08:32:06Z', 'name': 'watcher-service', 'sevice_host': 'controller', 'status_update': { 'watcher_object.data': { 'old_state': 'ACTIVE', 'state': 'FAILED' }, 'watcher_object.name': 'ServiceStatusUpdatePayload', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0' } }, 'watcher_object.name': 'ServiceUpdatePayload', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0' }, payload ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591600.6591353 python_watcher-14.0.0/watcher/tests/objects/0000775000175000017500000000000000000000000021105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/__init__.py0000664000175000017500000000000000000000000023204 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/test_action.py0000664000175000017500000002404300000000000023776 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_utils import timeutils from watcher.common import exception from watcher.common import utils as c_utils from watcher.db.sqlalchemy import api as db_api from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestActionObject(base.DbTestCase): action_plan_id = 2 scenarios = [ ('non_eager', dict( eager=False, fake_action=utils.get_test_action( action_plan_id=action_plan_id))), ('eager_with_non_eager_load', dict( eager=True, fake_action=utils.get_test_action( action_plan_id=action_plan_id))), ('eager_with_eager_load', dict( eager=True, fake_action=utils.get_test_action( action_plan_id=action_plan_id, action_plan=utils.get_test_action_plan(id=action_plan_id)))), ] def setUp(self): super(TestActionObject, self).setUp() p_action_notifications = mock.patch.object( notifications, 'action_plan', autospec=True) self.m_action_notifications = p_action_notifications.start() self.addCleanup(p_action_notifications.stop) self.m_send_update = self.m_action_notifications.send_update self.fake_action_plan = utils.create_test_action_plan( id=self.action_plan_id) def eager_action_assert(self, action): if self.eager: self.assertIsNotNone(action.action_plan) fields_to_check = set( super(objects.ActionPlan, objects.ActionPlan).fields ).symmetric_difference(objects.ActionPlan.fields) db_data = { k: v for k, v in self.fake_action_plan.as_dict().items() if k in fields_to_check} object_data = { k: v for k, v in action.action_plan.as_dict().items() if k in fields_to_check} self.assertEqual(db_data, object_data) @mock.patch.object(db_api.Connection, 'get_action_by_id') def test_get_by_id(self, mock_get_action): mock_get_action.return_value = self.fake_action action_id = self.fake_action['id'] action = objects.Action.get(self.context, action_id, eager=self.eager) mock_get_action.assert_called_once_with( self.context, action_id, eager=self.eager) self.assertEqual(self.context, action._context) self.eager_action_assert(action) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'get_action_by_uuid') def test_get_by_uuid(self, mock_get_action): mock_get_action.return_value = self.fake_action uuid = self.fake_action['uuid'] action = objects.Action.get(self.context, uuid, eager=self.eager) mock_get_action.assert_called_once_with( self.context, uuid, eager=self.eager) self.assertEqual(self.context, action._context) self.assertEqual(0, self.m_send_update.call_count) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.Action.get, self.context, 'not-a-uuid', eager=self.eager) @mock.patch.object(db_api.Connection, 'get_action_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_action] actions = objects.Action.list(self.context, eager=self.eager) self.assertEqual(1, mock_get_list.call_count) self.assertEqual(1, len(actions)) self.assertIsInstance(actions[0], objects.Action) self.assertEqual(self.context, actions[0]._context) for action in actions: self.eager_action_assert(action) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(objects.Strategy, 'get') @mock.patch.object(objects.Audit, 'get') @mock.patch.object(db_api.Connection, 'update_action') @mock.patch.object(db_api.Connection, 'get_action_by_uuid') def test_save(self, mock_get_action, mock_update_action, mock_get_audit, mock_get_strategy): mock_get_action.return_value = self.fake_action fake_saved_action = self.fake_action.copy() mock_get_audit.return_value = mock.PropertyMock( uuid=c_utils.generate_uuid()) mock_get_strategy.return_value = mock.PropertyMock( uuid=c_utils.generate_uuid()) fake_saved_action['updated_at'] = timeutils.utcnow() mock_update_action.return_value = fake_saved_action uuid = self.fake_action['uuid'] action = objects.Action.get_by_uuid( self.context, uuid, eager=self.eager) action.state = objects.action.State.SUCCEEDED if not self.eager: self.assertRaises(exception.EagerlyLoadedActionRequired, action.save) else: action.save() expected_update_at = fake_saved_action['updated_at'].replace( tzinfo=datetime.timezone.utc) mock_get_action.assert_called_once_with( self.context, uuid, eager=self.eager) mock_update_action.assert_called_once_with( uuid, {'state': objects.action.State.SUCCEEDED}) self.assertEqual(self.context, action._context) self.assertEqual(expected_update_at, action.updated_at) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'get_action_by_uuid') def test_refresh(self, mock_get_action): returns = [dict(self.fake_action, state="first state"), dict(self.fake_action, state="second state")] mock_get_action.side_effect = returns uuid = self.fake_action['uuid'] expected = [mock.call(self.context, uuid, eager=self.eager), mock.call(self.context, uuid, eager=self.eager)] action = objects.Action.get(self.context, uuid, eager=self.eager) self.assertEqual("first state", action.state) action.refresh(eager=self.eager) self.assertEqual("second state", action.state) self.assertEqual(expected, mock_get_action.call_args_list) self.assertEqual(self.context, action._context) self.eager_action_assert(action) self.assertEqual(0, self.m_send_update.call_count) class TestCreateDeleteActionObject(base.DbTestCase): def setUp(self): super(TestCreateDeleteActionObject, self).setUp() self.fake_strategy = utils.create_test_strategy(name="DUMMY") self.fake_audit = utils.create_test_audit() self.fake_action_plan = utils.create_test_action_plan() self.fake_action = utils.get_test_action( created_at=timeutils.utcnow()) @mock.patch.object(db_api.Connection, 'create_action') def test_create(self, mock_create_action): mock_create_action.return_value = self.fake_action action = objects.Action(self.context, **self.fake_action) action.create() expected_action = self.fake_action.copy() expected_action['created_at'] = expected_action['created_at'].replace( tzinfo=datetime.timezone.utc) mock_create_action.assert_called_once_with(expected_action) self.assertEqual(self.context, action._context) @mock.patch.object(notifications.action, 'send_delete') @mock.patch.object(notifications.action, 'send_update') @mock.patch.object(db_api.Connection, 'update_action') @mock.patch.object(db_api.Connection, 'soft_delete_action') @mock.patch.object(db_api.Connection, 'get_action_by_uuid') def test_soft_delete(self, mock_get_action, mock_soft_delete_action, mock_update_action, mock_send_update, mock_send_delete): mock_get_action.return_value = self.fake_action fake_deleted_action = self.fake_action.copy() fake_deleted_action['deleted_at'] = timeutils.utcnow() mock_soft_delete_action.return_value = fake_deleted_action mock_update_action.return_value = fake_deleted_action expected_action = fake_deleted_action.copy() expected_action['created_at'] = expected_action['created_at'].replace( tzinfo=datetime.timezone.utc) expected_action['deleted_at'] = expected_action['deleted_at'].replace( tzinfo=datetime.timezone.utc) del expected_action['action_plan'] uuid = self.fake_action['uuid'] action = objects.Action.get_by_uuid(self.context, uuid) action.soft_delete() mock_get_action.assert_called_once_with( self.context, uuid, eager=False) mock_soft_delete_action.assert_called_once_with(uuid) mock_update_action.assert_called_once_with( uuid, {'state': objects.action.State.DELETED}) self.assertEqual(self.context, action._context) self.assertEqual(expected_action, action.as_dict()) @mock.patch.object(db_api.Connection, 'destroy_action') @mock.patch.object(db_api.Connection, 'get_action_by_uuid') def test_destroy(self, mock_get_action, mock_destroy_action): mock_get_action.return_value = self.fake_action uuid = self.fake_action['uuid'] action = objects.Action.get_by_uuid(self.context, uuid) action.destroy() mock_get_action.assert_called_once_with( self.context, uuid, eager=False) mock_destroy_action.assert_called_once_with(uuid) self.assertEqual(self.context, action._context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/test_action_description.py0000664000175000017500000001311300000000000026375 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 ZTE # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_utils import timeutils from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestActionDescriptionObject(base.DbTestCase): def setUp(self): super(TestActionDescriptionObject, self).setUp() self.fake_action_desc = utils.get_test_action_desc( created_at=timeutils.utcnow()) @mock.patch.object(db_api.Connection, 'get_action_description_by_id') def test_get_by_id(self, mock_get_action_desc): action_desc_id = self.fake_action_desc['id'] mock_get_action_desc.return_value = self.fake_action_desc action_desc = objects.ActionDescription.get( self.context, action_desc_id) mock_get_action_desc.assert_called_once_with( self.context, action_desc_id) self.assertEqual(self.context, action_desc._context) @mock.patch.object(db_api.Connection, 'get_action_description_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_action_desc] action_desc = objects.ActionDescription.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertEqual(1, len(action_desc)) self.assertIsInstance(action_desc[0], objects.ActionDescription) self.assertEqual(self.context, action_desc[0]._context) @mock.patch.object(db_api.Connection, 'create_action_description') def test_create(self, mock_create_action_desc): mock_create_action_desc.return_value = self.fake_action_desc action_desc = objects.ActionDescription( self.context, **self.fake_action_desc) action_desc.create() expected_action_desc = self.fake_action_desc.copy() expected_action_desc['created_at'] = expected_action_desc[ 'created_at'].replace(tzinfo=datetime.timezone.utc) mock_create_action_desc.assert_called_once_with(expected_action_desc) self.assertEqual(self.context, action_desc._context) @mock.patch.object(db_api.Connection, 'update_action_description') @mock.patch.object(db_api.Connection, 'get_action_description_by_id') def test_save(self, mock_get_action_desc, mock_update_action_desc): mock_get_action_desc.return_value = self.fake_action_desc fake_saved_action_desc = self.fake_action_desc.copy() fake_saved_action_desc['updated_at'] = timeutils.utcnow() mock_update_action_desc.return_value = fake_saved_action_desc _id = self.fake_action_desc['id'] action_desc = objects.ActionDescription.get(self.context, _id) action_desc.description = 'This is a test' action_desc.save() mock_get_action_desc.assert_called_once_with(self.context, _id) mock_update_action_desc.assert_called_once_with( _id, {'description': 'This is a test'}) self.assertEqual(self.context, action_desc._context) @mock.patch.object(db_api.Connection, 'get_action_description_by_id') def test_refresh(self, mock_get_action_desc): returns = [dict(self.fake_action_desc, description="Test message1"), dict(self.fake_action_desc, description="Test message2")] mock_get_action_desc.side_effect = returns _id = self.fake_action_desc['id'] expected = [mock.call(self.context, _id), mock.call(self.context, _id)] action_desc = objects.ActionDescription.get(self.context, _id) self.assertEqual("Test message1", action_desc.description) action_desc.refresh() self.assertEqual("Test message2", action_desc.description) self.assertEqual(expected, mock_get_action_desc.call_args_list) self.assertEqual(self.context, action_desc._context) @mock.patch.object(db_api.Connection, 'soft_delete_action_description') @mock.patch.object(db_api.Connection, 'get_action_description_by_id') def test_soft_delete(self, mock_get_action_desc, mock_soft_delete): mock_get_action_desc.return_value = self.fake_action_desc fake_deleted_action_desc = self.fake_action_desc.copy() fake_deleted_action_desc['deleted_at'] = timeutils.utcnow() mock_soft_delete.return_value = fake_deleted_action_desc expected_action_desc = fake_deleted_action_desc.copy() expected_action_desc['created_at'] = expected_action_desc[ 'created_at'].replace(tzinfo=datetime.timezone.utc) expected_action_desc['deleted_at'] = expected_action_desc[ 'deleted_at'].replace(tzinfo=datetime.timezone.utc) _id = self.fake_action_desc['id'] action_desc = objects.ActionDescription.get(self.context, _id) action_desc.soft_delete() mock_get_action_desc.assert_called_once_with(self.context, _id) mock_soft_delete.assert_called_once_with(_id) self.assertEqual(self.context, action_desc._context) self.assertEqual(expected_action_desc, action_desc.as_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/test_action_plan.py0000664000175000017500000003462600000000000025020 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_utils import timeutils from watcher.common import exception from watcher.common import utils as common_utils from watcher import conf from watcher.db.sqlalchemy import api as db_api from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils CONF = conf.CONF class TestActionPlanObject(base.DbTestCase): audit_id = 2 strategy_id = 2 scenarios = [ ('non_eager', dict( eager=False, fake_action_plan=utils.get_test_action_plan( created_at=timeutils.utcnow(), audit_id=audit_id, strategy_id=strategy_id))), ('eager_with_non_eager_load', dict( eager=True, fake_action_plan=utils.get_test_action_plan( created_at=timeutils.utcnow(), audit_id=audit_id, strategy_id=strategy_id))), ('eager_with_eager_load', dict( eager=True, fake_action_plan=utils.get_test_action_plan( created_at=timeutils.utcnow(), strategy_id=strategy_id, strategy=utils.get_test_strategy(id=strategy_id), audit_id=audit_id, audit=utils.get_test_audit(id=audit_id)))), ] def setUp(self): super(TestActionPlanObject, self).setUp() p_action_plan_notifications = mock.patch.object( notifications, 'action_plan', autospec=True) self.m_action_plan_notifications = p_action_plan_notifications.start() self.addCleanup(p_action_plan_notifications.stop) self.m_send_update = self.m_action_plan_notifications.send_update self.fake_audit = utils.create_test_audit(id=self.audit_id) self.fake_strategy = utils.create_test_strategy( id=self.strategy_id, name="DUMMY") def eager_load_action_plan_assert(self, action_plan): if self.eager: self.assertIsNotNone(action_plan.audit) fields_to_check = set( super(objects.Audit, objects.Audit).fields ).symmetric_difference(objects.Audit.fields) db_data = { k: v for k, v in self.fake_audit.as_dict().items() if k in fields_to_check} object_data = { k: v for k, v in action_plan.audit.as_dict().items() if k in fields_to_check} self.assertEqual(db_data, object_data) @mock.patch.object(db_api.Connection, 'get_action_plan_by_id') def test_get_by_id(self, mock_get_action_plan): mock_get_action_plan.return_value = self.fake_action_plan action_plan_id = self.fake_action_plan['id'] action_plan = objects.ActionPlan.get( self.context, action_plan_id, eager=self.eager) mock_get_action_plan.assert_called_once_with( self.context, action_plan_id, eager=self.eager) self.assertEqual(self.context, action_plan._context) self.eager_load_action_plan_assert(action_plan) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') def test_get_by_uuid(self, mock_get_action_plan): mock_get_action_plan.return_value = self.fake_action_plan uuid = self.fake_action_plan['uuid'] action_plan = objects.ActionPlan.get( self.context, uuid, eager=self.eager) mock_get_action_plan.assert_called_once_with( self.context, uuid, eager=self.eager) self.assertEqual(self.context, action_plan._context) self.eager_load_action_plan_assert(action_plan) self.assertEqual(0, self.m_send_update.call_count) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.ActionPlan.get, self.context, 'not-a-uuid', eager=self.eager) @mock.patch.object(db_api.Connection, 'get_action_plan_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_action_plan] action_plans = objects.ActionPlan.list(self.context, eager=self.eager) self.assertEqual(1, mock_get_list.call_count) self.assertEqual(1, len(action_plans)) self.assertIsInstance(action_plans[0], objects.ActionPlan) self.assertEqual(self.context, action_plans[0]._context) for action_plan in action_plans: self.eager_load_action_plan_assert(action_plan) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'update_action_plan') @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') def test_save(self, mock_get_action_plan, mock_update_action_plan): mock_get_action_plan.return_value = self.fake_action_plan fake_saved_action_plan = self.fake_action_plan.copy() fake_saved_action_plan['state'] = objects.action_plan.State.SUCCEEDED fake_saved_action_plan['updated_at'] = timeutils.utcnow() mock_update_action_plan.return_value = fake_saved_action_plan expected_action_plan = fake_saved_action_plan.copy() expected_action_plan[ 'created_at'] = expected_action_plan['created_at'].replace( tzinfo=datetime.timezone.utc) expected_action_plan[ 'updated_at'] = expected_action_plan['updated_at'].replace( tzinfo=datetime.timezone.utc) uuid = self.fake_action_plan['uuid'] action_plan = objects.ActionPlan.get_by_uuid( self.context, uuid, eager=self.eager) action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() mock_get_action_plan.assert_called_once_with( self.context, uuid, eager=self.eager) mock_update_action_plan.assert_called_once_with( uuid, {'state': objects.action_plan.State.SUCCEEDED}) self.assertEqual(self.context, action_plan._context) self.eager_load_action_plan_assert(action_plan) self.m_send_update.assert_called_once_with( self.context, action_plan, old_state=self.fake_action_plan['state']) self.assertEqual( {k: v for k, v in expected_action_plan.items() if k not in action_plan.object_fields}, {k: v for k, v in action_plan.as_dict().items() if k not in action_plan.object_fields}) @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') def test_refresh(self, mock_get_action_plan): returns = [dict(self.fake_action_plan, state="first state"), dict(self.fake_action_plan, state="second state")] mock_get_action_plan.side_effect = returns uuid = self.fake_action_plan['uuid'] expected = [mock.call(self.context, uuid, eager=self.eager), mock.call(self.context, uuid, eager=self.eager)] action_plan = objects.ActionPlan.get( self.context, uuid, eager=self.eager) self.assertEqual("first state", action_plan.state) action_plan.refresh(eager=self.eager) self.assertEqual("second state", action_plan.state) self.assertEqual(expected, mock_get_action_plan.call_args_list) self.assertEqual(self.context, action_plan._context) self.eager_load_action_plan_assert(action_plan) class TestCreateDeleteActionPlanObject(base.DbTestCase): def setUp(self): super(TestCreateDeleteActionPlanObject, self).setUp() p_action_plan_notifications = mock.patch.object( notifications, 'action_plan', autospec=True) self.m_action_plan_notifications = p_action_plan_notifications.start() self.addCleanup(p_action_plan_notifications.stop) self.m_send_update = self.m_action_plan_notifications.send_update self.fake_strategy = utils.create_test_strategy(name="DUMMY") self.fake_audit = utils.create_test_audit() self.fake_action_plan = utils.get_test_action_plan( created_at=timeutils.utcnow()) @mock.patch.object(db_api.Connection, 'create_action_plan') def test_create(self, mock_create_action_plan): mock_create_action_plan.return_value = self.fake_action_plan action_plan = objects.ActionPlan( self.context, **self.fake_action_plan) action_plan.create() expected_action_plan = self.fake_action_plan.copy() expected_action_plan['created_at'] = expected_action_plan[ 'created_at'].replace(tzinfo=datetime.timezone.utc) mock_create_action_plan.assert_called_once_with(expected_action_plan) self.assertEqual(self.context, action_plan._context) @mock.patch.multiple( db_api.Connection, get_action_plan_by_uuid=mock.DEFAULT, soft_delete_action_plan=mock.DEFAULT, update_action_plan=mock.DEFAULT, get_efficacy_indicator_list=mock.DEFAULT, soft_delete_efficacy_indicator=mock.DEFAULT, ) def test_soft_delete(self, get_action_plan_by_uuid, soft_delete_action_plan, update_action_plan, get_efficacy_indicator_list, soft_delete_efficacy_indicator): efficacy_indicator = utils.get_test_efficacy_indicator( action_plan_id=self.fake_action_plan['id']) uuid = self.fake_action_plan['uuid'] m_get_action_plan = get_action_plan_by_uuid m_soft_delete_action_plan = soft_delete_action_plan m_get_efficacy_indicator_list = get_efficacy_indicator_list m_soft_delete_efficacy_indicator = soft_delete_efficacy_indicator m_update_action_plan = update_action_plan m_get_action_plan.return_value = self.fake_action_plan fake_deleted_action_plan = self.fake_action_plan.copy() fake_deleted_action_plan['deleted_at'] = timeutils.utcnow() m_update_action_plan.return_value = fake_deleted_action_plan m_soft_delete_action_plan.return_value = fake_deleted_action_plan expected_action_plan = fake_deleted_action_plan.copy() expected_action_plan['created_at'] = expected_action_plan[ 'created_at'].replace(tzinfo=datetime.timezone.utc) expected_action_plan['deleted_at'] = expected_action_plan[ 'deleted_at'].replace(tzinfo=datetime.timezone.utc) del expected_action_plan['audit'] del expected_action_plan['strategy'] m_get_efficacy_indicator_list.return_value = [efficacy_indicator] action_plan = objects.ActionPlan.get_by_uuid( self.context, uuid, eager=False) action_plan.soft_delete() m_get_action_plan.assert_called_once_with( self.context, uuid, eager=False) m_get_efficacy_indicator_list.assert_called_once_with( self.context, filters={"action_plan_uuid": uuid}, limit=None, marker=None, sort_dir=None, sort_key=None) m_soft_delete_action_plan.assert_called_once_with(uuid) m_soft_delete_efficacy_indicator.assert_called_once_with( efficacy_indicator['uuid']) m_update_action_plan.assert_called_once_with( uuid, {'state': objects.action_plan.State.DELETED}) self.assertEqual(self.context, action_plan._context) self.assertEqual(expected_action_plan, action_plan.as_dict()) @mock.patch.multiple( db_api.Connection, get_action_plan_by_uuid=mock.DEFAULT, destroy_action_plan=mock.DEFAULT, get_efficacy_indicator_list=mock.DEFAULT, destroy_efficacy_indicator=mock.DEFAULT, ) def test_destroy(self, get_action_plan_by_uuid, destroy_action_plan, get_efficacy_indicator_list, destroy_efficacy_indicator): m_get_action_plan = get_action_plan_by_uuid m_destroy_action_plan = destroy_action_plan m_get_efficacy_indicator_list = get_efficacy_indicator_list m_destroy_efficacy_indicator = destroy_efficacy_indicator efficacy_indicator = utils.get_test_efficacy_indicator( action_plan_id=self.fake_action_plan['id']) uuid = self.fake_action_plan['uuid'] m_get_action_plan.return_value = self.fake_action_plan m_get_efficacy_indicator_list.return_value = [efficacy_indicator] action_plan = objects.ActionPlan.get_by_uuid(self.context, uuid) action_plan.destroy() m_get_action_plan.assert_called_once_with( self.context, uuid, eager=False) m_get_efficacy_indicator_list.assert_called_once_with( self.context, filters={"action_plan_uuid": uuid}, limit=None, marker=None, sort_dir=None, sort_key=None) m_destroy_action_plan.assert_called_once_with(uuid) m_destroy_efficacy_indicator.assert_called_once_with( efficacy_indicator['uuid']) self.assertEqual(self.context, action_plan._context) @mock.patch.object(notifications.action_plan, 'send_update', mock.Mock()) class TestStateManager(base.DbTestCase): def setUp(self): super(TestStateManager, self).setUp() self.state_manager = objects.action_plan.StateManager() def test_check_expired(self): CONF.set_default('action_plan_expiry', 0, group='watcher_decision_engine') strategy_1 = utils.create_test_strategy( uuid=common_utils.generate_uuid()) audit_1 = utils.create_test_audit( uuid=common_utils.generate_uuid()) action_plan_1 = utils.create_test_action_plan( state=objects.action_plan.State.RECOMMENDED, uuid=common_utils.generate_uuid(), audit_id=audit_1.id, strategy_id=strategy_1.id) self.state_manager.check_expired(self.context) action_plan = objects.action_plan.ActionPlan.get_by_uuid( self.context, action_plan_1.uuid) self.assertEqual(objects.action_plan.State.SUPERSEDED, action_plan.state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/test_audit.py0000664000175000017500000003372600000000000023637 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_utils import timeutils from watcher.common import exception from watcher.common import rpc from watcher.common import utils as w_utils from watcher.db.sqlalchemy import api as db_api from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils from watcher.tests.objects import utils as objutils class TestAuditObject(base.DbTestCase): goal_id = 2 goal_data = utils.get_test_goal( id=goal_id, uuid=w_utils.generate_uuid(), name="DUMMY") scenarios = [ ('non_eager', dict( eager=False, fake_audit=utils.get_test_audit( created_at=timeutils.utcnow(), goal_id=goal_id))), ('eager_with_non_eager_load', dict( eager=True, fake_audit=utils.get_test_audit( created_at=timeutils.utcnow(), goal_id=goal_id))), ('eager_with_eager_load', dict( eager=True, fake_audit=utils.get_test_audit( created_at=timeutils.utcnow(), goal_id=goal_id, goal=goal_data))), ] def setUp(self): super(TestAuditObject, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.m_send_update = self.m_audit_notifications.send_update self.fake_goal = utils.create_test_goal(**self.goal_data) def eager_load_audit_assert(self, audit, goal): if self.eager: self.assertIsNotNone(audit.goal) fields_to_check = set( super(objects.Goal, objects.Goal).fields ).symmetric_difference(objects.Goal.fields) db_data = { k: v for k, v in goal.as_dict().items() if k in fields_to_check} object_data = { k: v for k, v in audit.goal.as_dict().items() if k in fields_to_check} self.assertEqual(db_data, object_data) @mock.patch.object(db_api.Connection, 'get_audit_by_id') def test_get_by_id(self, mock_get_audit): mock_get_audit.return_value = self.fake_audit audit_id = self.fake_audit['id'] audit = objects.Audit.get(self.context, audit_id, eager=self.eager) mock_get_audit.assert_called_once_with( self.context, audit_id, eager=self.eager) self.assertEqual(self.context, audit._context) self.eager_load_audit_assert(audit, self.fake_goal) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_get_by_uuid(self, mock_get_audit): mock_get_audit.return_value = self.fake_audit uuid = self.fake_audit['uuid'] audit = objects.Audit.get(self.context, uuid, eager=self.eager) mock_get_audit.assert_called_once_with( self.context, uuid, eager=self.eager) self.assertEqual(self.context, audit._context) self.eager_load_audit_assert(audit, self.fake_goal) self.assertEqual(0, self.m_send_update.call_count) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.Audit.get, self.context, 'not-a-uuid', eager=self.eager) @mock.patch.object(db_api.Connection, 'get_audit_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_audit] audits = objects.Audit.list(self.context, eager=self.eager) mock_get_list.assert_called_once_with( self.context, eager=self.eager, filters=None, limit=None, marker=None, sort_dir=None, sort_key=None) self.assertEqual(1, len(audits)) self.assertIsInstance(audits[0], objects.Audit) self.assertEqual(self.context, audits[0]._context) for audit in audits: self.eager_load_audit_assert(audit, self.fake_goal) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'update_audit') @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_save(self, mock_get_audit, mock_update_audit): mock_get_audit.return_value = self.fake_audit fake_saved_audit = self.fake_audit.copy() fake_saved_audit['state'] = objects.audit.State.SUCCEEDED fake_saved_audit['updated_at'] = timeutils.utcnow() mock_update_audit.return_value = fake_saved_audit expected_audit = fake_saved_audit.copy() expected_audit['created_at'] = expected_audit['created_at'].replace( tzinfo=datetime.timezone.utc) expected_audit['updated_at'] = expected_audit['updated_at'].replace( tzinfo=datetime.timezone.utc) uuid = self.fake_audit['uuid'] audit = objects.Audit.get_by_uuid(self.context, uuid, eager=self.eager) audit.state = objects.audit.State.SUCCEEDED audit.save() mock_get_audit.assert_called_once_with( self.context, uuid, eager=self.eager) mock_update_audit.assert_called_once_with( uuid, {'state': objects.audit.State.SUCCEEDED}) self.assertEqual(self.context, audit._context) self.eager_load_audit_assert(audit, self.fake_goal) self.m_send_update.assert_called_once_with( self.context, audit, old_state=self.fake_audit['state']) self.assertEqual( {k: v for k, v in expected_audit.items() if k not in audit.object_fields}, {k: v for k, v in audit.as_dict().items() if k not in audit.object_fields}) @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_refresh(self, mock_get_audit): returns = [dict(self.fake_audit, state="first state"), dict(self.fake_audit, state="second state")] mock_get_audit.side_effect = returns uuid = self.fake_audit['uuid'] expected = [ mock.call(self.context, uuid, eager=self.eager), mock.call(self.context, uuid, eager=self.eager)] audit = objects.Audit.get(self.context, uuid, eager=self.eager) self.assertEqual("first state", audit.state) audit.refresh(eager=self.eager) self.assertEqual("second state", audit.state) self.assertEqual(expected, mock_get_audit.call_args_list) self.assertEqual(self.context, audit._context) self.eager_load_audit_assert(audit, self.fake_goal) class TestCreateDeleteAuditObject(base.DbTestCase): def setUp(self): super(TestCreateDeleteAuditObject, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.m_send_update = self.m_audit_notifications.send_update self.goal_id = 1 self.goal = utils.create_test_goal(id=self.goal_id, name="DUMMY") self.fake_audit = utils.get_test_audit( goal_id=self.goal_id, created_at=timeutils.utcnow()) @mock.patch.object(db_api.Connection, 'create_audit') def test_create(self, mock_create_audit): mock_create_audit.return_value = self.fake_audit audit = objects.Audit(self.context, **self.fake_audit) audit.create() expected_audit = self.fake_audit.copy() expected_audit['created_at'] = expected_audit['created_at'].replace( tzinfo=datetime.timezone.utc) mock_create_audit.assert_called_once_with(expected_audit) self.assertEqual(self.context, audit._context) @mock.patch.object(db_api.Connection, 'update_audit') @mock.patch.object(db_api.Connection, 'soft_delete_audit') @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_soft_delete(self, mock_get_audit, mock_soft_delete_audit, mock_update_audit): mock_get_audit.return_value = self.fake_audit fake_deleted_audit = self.fake_audit.copy() fake_deleted_audit['deleted_at'] = timeutils.utcnow() mock_soft_delete_audit.return_value = fake_deleted_audit mock_update_audit.return_value = fake_deleted_audit expected_audit = fake_deleted_audit.copy() expected_audit['created_at'] = expected_audit['created_at'].replace( tzinfo=datetime.timezone.utc) expected_audit['deleted_at'] = expected_audit['deleted_at'].replace( tzinfo=datetime.timezone.utc) del expected_audit['goal'] del expected_audit['strategy'] uuid = self.fake_audit['uuid'] audit = objects.Audit.get_by_uuid(self.context, uuid, eager=False) audit.soft_delete() mock_get_audit.assert_called_once_with(self.context, uuid, eager=False) mock_soft_delete_audit.assert_called_once_with(uuid) mock_update_audit.assert_called_once_with(uuid, {'state': 'DELETED'}) self.assertEqual(self.context, audit._context) self.assertEqual(expected_audit, audit.as_dict()) @mock.patch.object(db_api.Connection, 'destroy_audit') @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_destroy(self, mock_get_audit, mock_destroy_audit): mock_get_audit.return_value = self.fake_audit uuid = self.fake_audit['uuid'] audit = objects.Audit.get_by_uuid(self.context, uuid) audit.destroy() mock_get_audit.assert_called_once_with( self.context, uuid, eager=False) mock_destroy_audit.assert_called_once_with(uuid) self.assertEqual(self.context, audit._context) class TestAuditObjectSendNotifications(base.DbTestCase): def setUp(self): super(TestAuditObjectSendNotifications, self).setUp() goal_id = 1 self.fake_goal = utils.create_test_goal(id=goal_id, name="DUMMY") self.fake_strategy = utils.create_test_strategy( id=goal_id, name="DUMMY") self.fake_audit = utils.get_test_audit( goal_id=goal_id, goal=utils.get_test_goal(id=goal_id), strategy_id=self.fake_strategy.id, strategy=self.fake_strategy) p_get_notifier = mock.patch.object(rpc, 'get_notifier') self.m_get_notifier = p_get_notifier.start() self.m_get_notifier.return_value = mock.Mock(name='m_notifier') self.m_notifier = self.m_get_notifier.return_value self.addCleanup(p_get_notifier.stop) @mock.patch.object(db_api.Connection, 'update_audit') @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_send_update_notification(self, m_get_audit, m_update_audit): fake_audit = utils.get_test_audit( goal=self.fake_goal.as_dict(), strategy_id=self.fake_strategy.id, strategy=self.fake_strategy.as_dict()) m_get_audit.return_value = fake_audit fake_saved_audit = self.fake_audit.copy() fake_saved_audit['state'] = objects.audit.State.SUCCEEDED m_update_audit.return_value = fake_saved_audit uuid = fake_audit['uuid'] audit = objects.Audit.get_by_uuid(self.context, uuid, eager=True) audit.state = objects.audit.State.ONGOING audit.save() self.assertEqual(1, self.m_notifier.info.call_count) self.assertEqual('audit.update', self.m_notifier.info.call_args[1]['event_type']) @mock.patch.object(db_api.Connection, 'create_audit') def test_send_create_notification(self, m_create_audit): audit = objutils.get_test_audit( self.context, id=1, goal_id=self.fake_goal.id, strategy_id=self.fake_strategy.id, goal=self.fake_goal.as_dict(), strategy=self.fake_strategy.as_dict()) m_create_audit.return_value = audit audit.create() self.assertEqual(1, self.m_notifier.info.call_count) self.assertEqual('audit.create', self.m_notifier.info.call_args[1]['event_type']) @mock.patch.object(db_api.Connection, 'update_audit') @mock.patch.object(db_api.Connection, 'soft_delete_audit') @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_send_delete_notification( self, m_get_audit, m_soft_delete_audit, m_update_audit): fake_audit = utils.get_test_audit( goal=self.fake_goal.as_dict(), strategy_id=self.fake_strategy.id, strategy=self.fake_strategy.as_dict()) m_get_audit.return_value = fake_audit fake_deleted_audit = self.fake_audit.copy() fake_deleted_audit['deleted_at'] = timeutils.utcnow() expected_audit = fake_deleted_audit.copy() expected_audit['deleted_at'] = expected_audit['deleted_at'].replace( tzinfo=datetime.timezone.utc) m_soft_delete_audit.return_value = fake_deleted_audit m_update_audit.return_value = fake_deleted_audit uuid = fake_audit['uuid'] audit = objects.Audit.get_by_uuid(self.context, uuid, eager=True) audit.soft_delete() self.assertEqual(2, self.m_notifier.info.call_count) self.assertEqual( 'audit.update', self.m_notifier.info.call_args_list[0][1]['event_type']) self.assertEqual( 'audit.delete', self.m_notifier.info.call_args_list[1][1]['event_type']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/test_audit_template.py0000664000175000017500000002465600000000000025534 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_utils import timeutils from watcher.common import exception from watcher.common import utils as w_utils from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestAuditTemplateObject(base.DbTestCase): goal_id = 1 goal_data = utils.get_test_goal( id=goal_id, uuid=w_utils.generate_uuid(), name="DUMMY") scenarios = [ ('non_eager', dict( eager=False, fake_audit_template=utils.get_test_audit_template( created_at=timeutils.utcnow(), goal_id=goal_id))), ('eager_with_non_eager_load', dict( eager=True, fake_audit_template=utils.get_test_audit_template( created_at=timeutils.utcnow(), goal_id=goal_id))), ('eager_with_eager_load', dict( eager=True, fake_audit_template=utils.get_test_audit_template( created_at=timeutils.utcnow(), goal_id=goal_id, goal=goal_data))), ] def setUp(self): super(TestAuditTemplateObject, self).setUp() self.fake_goal = utils.create_test_goal(**self.goal_data) def eager_load_audit_template_assert(self, audit_template, goal): if self.eager: self.assertIsNotNone(audit_template.goal) fields_to_check = set( super(objects.Goal, objects.Goal).fields ).symmetric_difference(objects.Goal.fields) db_data = { k: v for k, v in goal.as_dict().items() if k in fields_to_check} object_data = { k: v for k, v in audit_template.goal.as_dict().items() if k in fields_to_check} self.assertEqual(db_data, object_data) @mock.patch.object(db_api.Connection, 'get_audit_template_by_id') def test_get_by_id(self, mock_get_audit_template): mock_get_audit_template.return_value = self.fake_audit_template audit_template_id = self.fake_audit_template['id'] audit_template = objects.AuditTemplate.get( self.context, audit_template_id, eager=self.eager) mock_get_audit_template.assert_called_once_with( self.context, audit_template_id, eager=self.eager) self.assertEqual(self.context, audit_template._context) self.eager_load_audit_template_assert(audit_template, self.fake_goal) @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') def test_get_by_uuid(self, mock_get_audit_template): mock_get_audit_template.return_value = self.fake_audit_template uuid = self.fake_audit_template['uuid'] audit_template = objects.AuditTemplate.get( self.context, uuid, eager=self.eager) mock_get_audit_template.assert_called_once_with( self.context, uuid, eager=self.eager) self.assertEqual(self.context, audit_template._context) self.eager_load_audit_template_assert(audit_template, self.fake_goal) @mock.patch.object(db_api.Connection, 'get_audit_template_by_name') def test_get_by_name(self, mock_get_audit_template): mock_get_audit_template.return_value = self.fake_audit_template name = self.fake_audit_template['name'] audit_template = objects.AuditTemplate.get_by_name( self.context, name, eager=self.eager) mock_get_audit_template.assert_called_once_with( self.context, name, eager=self.eager) self.assertEqual(self.context, audit_template._context) self.eager_load_audit_template_assert(audit_template, self.fake_goal) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.AuditTemplate.get, self.context, 'not-a-uuid', eager=self.eager) @mock.patch.object(db_api.Connection, 'get_audit_template_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_audit_template] audit_templates = objects.AuditTemplate.list( self.context, eager=self.eager) mock_get_list.assert_called_once_with( self.context, eager=self.eager, filters=None, limit=None, marker=None, sort_dir=None, sort_key=None) self.assertEqual(1, len(audit_templates)) self.assertIsInstance(audit_templates[0], objects.AuditTemplate) self.assertEqual(self.context, audit_templates[0]._context) for audit_template in audit_templates: self.eager_load_audit_template_assert( audit_template, self.fake_goal) @mock.patch.object(db_api.Connection, 'update_audit_template') @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') def test_save(self, mock_get_audit_template, mock_update_audit_template): mock_get_audit_template.return_value = self.fake_audit_template fake_saved_audit_template = self.fake_audit_template.copy() fake_saved_audit_template['updated_at'] = timeutils.utcnow() mock_update_audit_template.return_value = fake_saved_audit_template uuid = self.fake_audit_template['uuid'] audit_template = objects.AuditTemplate.get_by_uuid( self.context, uuid, eager=self.eager) audit_template.goal_id = self.fake_goal.id audit_template.save() mock_get_audit_template.assert_called_once_with( self.context, uuid, eager=self.eager) mock_update_audit_template.assert_called_once_with( uuid, {'goal_id': self.fake_goal.id}) self.assertEqual(self.context, audit_template._context) self.eager_load_audit_template_assert(audit_template, self.fake_goal) @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') def test_refresh(self, mock_get_audit_template): returns = [dict(self.fake_audit_template, name="first name"), dict(self.fake_audit_template, name="second name")] mock_get_audit_template.side_effect = returns uuid = self.fake_audit_template['uuid'] expected = [mock.call(self.context, uuid, eager=self.eager), mock.call(self.context, uuid, eager=self.eager)] audit_template = objects.AuditTemplate.get( self.context, uuid, eager=self.eager) self.assertEqual("first name", audit_template.name) audit_template.refresh(eager=self.eager) self.assertEqual("second name", audit_template.name) self.assertEqual(expected, mock_get_audit_template.call_args_list) self.assertEqual(self.context, audit_template._context) self.eager_load_audit_template_assert(audit_template, self.fake_goal) class TestCreateDeleteAuditTemplateObject(base.DbTestCase): def setUp(self): super(TestCreateDeleteAuditTemplateObject, self).setUp() self.fake_audit_template = utils.get_test_audit_template( created_at=timeutils.utcnow()) @mock.patch.object(db_api.Connection, 'create_audit_template') def test_create(self, mock_create_audit_template): goal = utils.create_test_goal() self.fake_audit_template['goal_id'] = goal.id mock_create_audit_template.return_value = self.fake_audit_template audit_template = objects.AuditTemplate( self.context, **self.fake_audit_template) audit_template.create() expected_audit_template = self.fake_audit_template.copy() expected_audit_template['created_at'] = expected_audit_template[ 'created_at'].replace(tzinfo=datetime.timezone.utc) mock_create_audit_template.assert_called_once_with( expected_audit_template) self.assertEqual(self.context, audit_template._context) @mock.patch.object(db_api.Connection, 'soft_delete_audit_template') @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') def test_soft_delete(self, m_get_audit_template, m_soft_delete_audit_template): m_get_audit_template.return_value = self.fake_audit_template fake_deleted_audit_template = self.fake_audit_template.copy() fake_deleted_audit_template['deleted_at'] = timeutils.utcnow() m_soft_delete_audit_template.return_value = fake_deleted_audit_template expected_audit_template = fake_deleted_audit_template.copy() expected_audit_template['created_at'] = expected_audit_template[ 'created_at'].replace(tzinfo=datetime.timezone.utc) expected_audit_template['deleted_at'] = expected_audit_template[ 'deleted_at'].replace(tzinfo=datetime.timezone.utc) del expected_audit_template['goal'] del expected_audit_template['strategy'] uuid = self.fake_audit_template['uuid'] audit_template = objects.AuditTemplate.get_by_uuid(self.context, uuid) audit_template.soft_delete() m_get_audit_template.assert_called_once_with( self.context, uuid, eager=False) m_soft_delete_audit_template.assert_called_once_with(uuid) self.assertEqual(self.context, audit_template._context) self.assertEqual(expected_audit_template, audit_template.as_dict()) @mock.patch.object(db_api.Connection, 'destroy_audit_template') @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') def test_destroy(self, mock_get_audit_template, mock_destroy_audit_template): mock_get_audit_template.return_value = self.fake_audit_template uuid = self.fake_audit_template['uuid'] audit_template = objects.AuditTemplate.get_by_uuid(self.context, uuid) audit_template.destroy() mock_get_audit_template.assert_called_once_with( self.context, uuid, eager=False) mock_destroy_audit_template.assert_called_once_with(uuid) self.assertEqual(self.context, audit_template._context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/test_efficacy_indicator.py0000664000175000017500000001535000000000000026327 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from watcher.common import exception # from watcher.common import utils as w_utils from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestEfficacyIndicatorObject(base.DbTestCase): def setUp(self): super(TestEfficacyIndicatorObject, self).setUp() self.fake_efficacy_indicator = utils.get_test_efficacy_indicator() def test_get_by_id(self): efficacy_indicator_id = self.fake_efficacy_indicator['id'] with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_id', autospec=True) as mock_get_efficacy_indicator: mock_get_efficacy_indicator.return_value = ( self.fake_efficacy_indicator) efficacy_indicator = objects.EfficacyIndicator.get( self.context, efficacy_indicator_id) mock_get_efficacy_indicator.assert_called_once_with( self.context, efficacy_indicator_id) self.assertEqual(self.context, efficacy_indicator._context) def test_get_by_uuid(self): uuid = self.fake_efficacy_indicator['uuid'] with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_uuid', autospec=True) as mock_get_efficacy_indicator: mock_get_efficacy_indicator.return_value = ( self.fake_efficacy_indicator) efficacy_indicator = objects.EfficacyIndicator.get( self.context, uuid) mock_get_efficacy_indicator.assert_called_once_with( self.context, uuid) self.assertEqual(self.context, efficacy_indicator._context) def test_get_bad_id_and_uuid(self): self.assertRaises( exception.InvalidIdentity, objects.EfficacyIndicator.get, self.context, 'not-a-uuid') def test_list(self): with mock.patch.object(self.dbapi, 'get_efficacy_indicator_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_efficacy_indicator] efficacy_indicators = objects.EfficacyIndicator.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertEqual(1, len(efficacy_indicators)) self.assertIsInstance( efficacy_indicators[0], objects.EfficacyIndicator) self.assertEqual(self.context, efficacy_indicators[0]._context) def test_create(self): with mock.patch.object( self.dbapi, 'create_efficacy_indicator', autospec=True ) as mock_create_efficacy_indicator: mock_create_efficacy_indicator.return_value = ( self.fake_efficacy_indicator) efficacy_indicator = objects.EfficacyIndicator( self.context, **self.fake_efficacy_indicator) efficacy_indicator.create() mock_create_efficacy_indicator.assert_called_once_with( self.fake_efficacy_indicator) self.assertEqual(self.context, efficacy_indicator._context) def test_destroy(self): uuid = self.fake_efficacy_indicator['uuid'] with mock.patch.object( self.dbapi, 'get_efficacy_indicator_by_uuid', autospec=True ) as mock_get_efficacy_indicator: mock_get_efficacy_indicator.return_value = ( self.fake_efficacy_indicator) with mock.patch.object( self.dbapi, 'destroy_efficacy_indicator', autospec=True ) as mock_destroy_efficacy_indicator: efficacy_indicator = objects.EfficacyIndicator.get_by_uuid( self.context, uuid) efficacy_indicator.destroy() mock_get_efficacy_indicator.assert_called_once_with( self.context, uuid) mock_destroy_efficacy_indicator.assert_called_once_with(uuid) self.assertEqual(self.context, efficacy_indicator._context) def test_save(self): uuid = self.fake_efficacy_indicator['uuid'] with mock.patch.object( self.dbapi, 'get_efficacy_indicator_by_uuid', autospec=True ) as mock_get_efficacy_indicator: mock_get_efficacy_indicator.return_value = ( self.fake_efficacy_indicator) with mock.patch.object( self.dbapi, 'update_efficacy_indicator', autospec=True ) as mock_update_efficacy_indicator: efficacy_indicator = objects.EfficacyIndicator.get_by_uuid( self.context, uuid) efficacy_indicator.description = 'Indicator Description' efficacy_indicator.save() mock_get_efficacy_indicator.assert_called_once_with( self.context, uuid) mock_update_efficacy_indicator.assert_called_once_with( uuid, {'description': 'Indicator Description'}) self.assertEqual(self.context, efficacy_indicator._context) def test_refresh(self): uuid = self.fake_efficacy_indicator['uuid'] returns = [dict(self.fake_efficacy_indicator, description="first description"), dict(self.fake_efficacy_indicator, description="second description")] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_uuid', side_effect=returns, autospec=True) as mock_get_efficacy_indicator: efficacy_indicator = objects.EfficacyIndicator.get( self.context, uuid) self.assertEqual( "first description", efficacy_indicator.description) efficacy_indicator.refresh() self.assertEqual( "second description", efficacy_indicator.description) self.assertEqual( expected, mock_get_efficacy_indicator.call_args_list) self.assertEqual(self.context, efficacy_indicator._context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/test_goal.py0000664000175000017500000001407100000000000023443 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_utils import timeutils from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestGoalObject(base.DbTestCase): def setUp(self): super(TestGoalObject, self).setUp() self.fake_goal = utils.get_test_goal( created_at=timeutils.utcnow()) @mock.patch.object(db_api.Connection, 'get_goal_by_id') def test_get_by_id(self, mock_get_goal): goal_id = self.fake_goal['id'] mock_get_goal.return_value = self.fake_goal goal = objects.Goal.get(self.context, goal_id) mock_get_goal.assert_called_once_with(self.context, goal_id) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') def test_get_by_uuid(self, mock_get_goal): uuid = self.fake_goal['uuid'] mock_get_goal.return_value = self.fake_goal goal = objects.Goal.get(self.context, uuid) mock_get_goal.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'get_goal_by_name') def test_get_by_name(self, mock_get_goal): name = self.fake_goal['name'] mock_get_goal.return_value = self.fake_goal goal = objects.Goal.get_by_name(self.context, name) mock_get_goal.assert_called_once_with(self.context, name) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'get_goal_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_goal] goals = objects.Goal.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertEqual(1, len(goals)) self.assertIsInstance(goals[0], objects.Goal) self.assertEqual(self.context, goals[0]._context) @mock.patch.object(db_api.Connection, 'create_goal') def test_create(self, mock_create_goal): mock_create_goal.return_value = self.fake_goal goal = objects.Goal(self.context, **self.fake_goal) goal.create() expected_goal = self.fake_goal.copy() expected_goal['created_at'] = expected_goal['created_at'].replace( tzinfo=datetime.timezone.utc) mock_create_goal.assert_called_once_with(expected_goal) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'destroy_goal') @mock.patch.object(db_api.Connection, 'get_goal_by_id') def test_destroy(self, mock_get_goal, mock_destroy_goal): goal_id = self.fake_goal['id'] mock_get_goal.return_value = self.fake_goal goal = objects.Goal.get_by_id(self.context, goal_id) goal.destroy() mock_get_goal.assert_called_once_with( self.context, goal_id) mock_destroy_goal.assert_called_once_with(goal_id) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'update_goal') @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') def test_save(self, mock_get_goal, mock_update_goal): mock_get_goal.return_value = self.fake_goal goal_uuid = self.fake_goal['uuid'] fake_saved_goal = self.fake_goal.copy() fake_saved_goal['updated_at'] = timeutils.utcnow() mock_update_goal.return_value = fake_saved_goal goal = objects.Goal.get_by_uuid(self.context, goal_uuid) goal.display_name = 'DUMMY' goal.save() mock_get_goal.assert_called_once_with(self.context, goal_uuid) mock_update_goal.assert_called_once_with( goal_uuid, {'display_name': 'DUMMY'}) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') def test_refresh(self, mock_get_goal): fake_goal2 = utils.get_test_goal(name="BALANCE_LOAD") returns = [self.fake_goal, fake_goal2] mock_get_goal.side_effect = returns uuid = self.fake_goal['uuid'] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] goal = objects.Goal.get(self.context, uuid) self.assertEqual("TEST", goal.name) goal.refresh() self.assertEqual("BALANCE_LOAD", goal.name) self.assertEqual(expected, mock_get_goal.call_args_list) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'soft_delete_goal') @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') def test_soft_delete(self, mock_get_goal, mock_soft_delete_goal): mock_get_goal.return_value = self.fake_goal fake_deleted_goal = self.fake_goal.copy() fake_deleted_goal['deleted_at'] = timeutils.utcnow() mock_soft_delete_goal.return_value = fake_deleted_goal expected_goal = fake_deleted_goal.copy() expected_goal['created_at'] = expected_goal['created_at'].replace( tzinfo=datetime.timezone.utc) expected_goal['deleted_at'] = expected_goal['deleted_at'].replace( tzinfo=datetime.timezone.utc) uuid = self.fake_goal['uuid'] goal = objects.Goal.get_by_uuid(self.context, uuid) goal.soft_delete() mock_get_goal.assert_called_once_with(self.context, uuid) mock_soft_delete_goal.assert_called_once_with(uuid) self.assertEqual(self.context, goal._context) self.assertEqual(expected_goal, goal.as_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/test_objects.py0000664000175000017500000005152200000000000024154 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import datetime import gettext from unittest import mock from oslo_versionedobjects import base as object_base from oslo_versionedobjects import exception as object_exception from oslo_versionedobjects import fixture as object_fixture from watcher.common import context from watcher.objects import base from watcher.objects import fields from watcher.tests import base as test_base gettext.install('watcher') @base.WatcherObjectRegistry.register class MyObj(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): VERSION = '1.5' fields = {'foo': fields.IntegerField(), 'bar': fields.StringField(), 'missing': fields.StringField()} def obj_load_attr(self, attrname): setattr(self, attrname, 'loaded!') @object_base.remotable_classmethod def query(cls, context): obj = cls(context) obj.foo = 1 obj.bar = 'bar' obj.obj_reset_changes() return obj @object_base.remotable def marco(self, context=None): return 'polo' @object_base.remotable def update_test(self, context=None): if context and context.user == 'alternate': self.bar = 'alternate-context' else: self.bar = 'updated' @object_base.remotable def save(self, context=None): self.obj_reset_changes() @object_base.remotable def refresh(self, context=None): self.foo = 321 self.bar = 'refreshed' self.obj_reset_changes() @object_base.remotable def modify_save_modify(self, context=None): self.bar = 'meow' self.save() self.foo = 42 class MyObj2(object): @classmethod def obj_name(cls): return 'MyObj' @object_base.remotable_classmethod def get(cls, *args, **kwargs): pass @base.WatcherObjectRegistry.register_if(False) class WatcherTestSubclassedObject(MyObj): fields = {'new_field': fields.StringField()} class _LocalTest(test_base.TestCase): def setUp(self): super(_LocalTest, self).setUp() # Just in case base.WatcherObject.indirection_api = None @contextlib.contextmanager def things_temporarily_local(): # Temporarily go non-remote so the conductor handles # this request directly _api = base.WatcherObject.indirection_api base.WatcherObject.indirection_api = None yield base.WatcherObject.indirection_api = _api class _TestObject(object): def test_hydration_type_error(self): primitive = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.5', 'watcher_object.data': {'foo': 'a'}} self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive) def test_hydration(self): primitive = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.5', 'watcher_object.data': {'foo': 1}} obj = MyObj.obj_from_primitive(primitive) self.assertEqual(1, obj.foo) def test_hydration_bad_ns(self): primitive = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'foo', 'watcher_object.version': '1.5', 'watcher_object.data': {'foo': 1}} self.assertRaises(object_exception.UnsupportedObjectError, MyObj.obj_from_primitive, primitive) def test_dehydration(self): expected = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.5', 'watcher_object.data': {'foo': 1}} obj = MyObj(self.context) obj.foo = 1 obj.obj_reset_changes() self.assertEqual(expected, obj.obj_to_primitive()) def test_get_updates(self): obj = MyObj(self.context) self.assertEqual({}, obj.obj_get_changes()) obj.foo = 123 self.assertEqual({'foo': 123}, obj.obj_get_changes()) obj.bar = 'test' self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) def test_object_property(self): obj = MyObj(self.context, foo=1) self.assertEqual(1, obj.foo) def test_object_property_type_error(self): obj = MyObj(self.context) def fail(): obj.foo = 'a' self.assertRaises(ValueError, fail) def test_load(self): obj = MyObj(self.context) self.assertEqual('loaded!', obj.bar) def test_load_in_base(self): @base.WatcherObjectRegistry.register_if(False) class Foo(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): fields = {'foobar': fields.IntegerField()} obj = Foo(self.context) self.assertRaisesRegex( NotImplementedError, "Cannot load 'foobar' in the base class", getattr, obj, 'foobar') def test_loaded_in_primitive(self): obj = MyObj(self.context) obj.foo = 1 obj.obj_reset_changes() self.assertEqual('loaded!', obj.bar) expected = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.5', 'watcher_object.changes': ['bar'], 'watcher_object.data': {'foo': 1, 'bar': 'loaded!'}} self.assertEqual(expected, obj.obj_to_primitive()) def test_changes_in_primitive(self): obj = MyObj(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) primitive = obj.obj_to_primitive() self.assertIn('watcher_object.changes', primitive) obj2 = MyObj.obj_from_primitive(primitive) self.assertEqual(set(['foo']), obj2.obj_what_changed()) obj2.obj_reset_changes() self.assertEqual(set(), obj2.obj_what_changed()) def test_unknown_objtype(self): self.assertRaises(object_exception.UnsupportedObjectError, base.WatcherObject.obj_class_from_name, 'foo', '1.0') def test_with_alternate_context(self): ctxt1 = context.RequestContext('foo', 'foo') ctxt2 = context.RequestContext(user='alternate') obj = MyObj.query(ctxt1) obj.update_test(ctxt2) self.assertEqual('alternate-context', obj.bar) def test_orphaned_object(self): obj = MyObj.query(self.context) obj._context = None self.assertRaises(object_exception.OrphanedObjectError, obj.update_test) def test_changed_1(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.update_test(self.context) self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) self.assertEqual(123, obj.foo) def test_changed_2(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.save() self.assertEqual(set([]), obj.obj_what_changed()) self.assertEqual(123, obj.foo) def test_changed_3(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.refresh() self.assertEqual(set([]), obj.obj_what_changed()) self.assertEqual(321, obj.foo) self.assertEqual('refreshed', obj.bar) def test_changed_4(self): obj = MyObj.query(self.context) obj.bar = 'something' self.assertEqual(set(['bar']), obj.obj_what_changed()) obj.modify_save_modify(self.context) self.assertEqual(set(['foo']), obj.obj_what_changed()) self.assertEqual(42, obj.foo) self.assertEqual('meow', obj.bar) def test_static_result(self): obj = MyObj.query(self.context) self.assertEqual('bar', obj.bar) result = obj.marco() self.assertEqual('polo', result) def test_updates(self): obj = MyObj.query(self.context) self.assertEqual(1, obj.foo) obj.update_test() self.assertEqual('updated', obj.bar) def test_base_attributes(self): dt = datetime.datetime(1955, 11, 5, 0, 0, tzinfo=datetime.timezone.utc) datatime = fields.DateTimeField() obj = MyObj(self.context) obj.created_at = dt obj.updated_at = dt expected = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.5', 'watcher_object.changes': ['created_at', 'updated_at'], 'watcher_object.data': {'created_at': datatime.stringify(dt), 'updated_at': datatime.stringify(dt), } } actual = obj.obj_to_primitive() # watcher_object.changes is built from a set and order is undefined self.assertEqual(sorted(expected['watcher_object.changes']), sorted(actual['watcher_object.changes'])) del expected[ 'watcher_object.changes'], actual['watcher_object.changes'] self.assertEqual(expected, actual) def test_contains(self): obj = MyObj(self.context) self.assertNotIn('foo', obj) obj.foo = 1 self.assertIn('foo', obj) self.assertNotIn('does_not_exist', obj) def test_obj_attr_is_set(self): obj = MyObj(self.context, foo=1) self.assertTrue(obj.obj_attr_is_set('foo')) self.assertFalse(obj.obj_attr_is_set('bar')) self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang') def test_get(self): obj = MyObj(self.context, foo=1) # Foo has value, should not get the default self.assertEqual(obj.get('foo', 2), 1) # Foo has value, should return the value without error self.assertEqual(obj.get('foo'), 1) # Bar is not loaded, so we should get the default self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded') # Bar without a default should lazy-load self.assertEqual(obj.get('bar'), 'loaded!') # Bar now has a default, but loaded value should be returned self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!') # Invalid attribute should raise AttributeError self.assertRaises(AttributeError, obj.get, 'nothing') # ...even with a default self.assertRaises(AttributeError, obj.get, 'nothing', 3) def test_object_inheritance(self): base_fields = ( list(base.WatcherObject.fields) + list(base.WatcherPersistentObject.fields)) myobj_fields = ['foo', 'bar', 'missing'] + base_fields myobj3_fields = ['new_field'] self.assertTrue(issubclass(WatcherTestSubclassedObject, MyObj)) self.assertEqual(len(myobj_fields), len(MyObj.fields)) self.assertEqual(set(myobj_fields), set(MyObj.fields.keys())) self.assertEqual(len(myobj_fields) + len(myobj3_fields), len(WatcherTestSubclassedObject.fields)) self.assertEqual(set(myobj_fields) | set(myobj3_fields), set(WatcherTestSubclassedObject.fields.keys())) def test_get_changes(self): obj = MyObj(self.context) self.assertEqual({}, obj.obj_get_changes()) obj.foo = 123 self.assertEqual({'foo': 123}, obj.obj_get_changes()) obj.bar = 'test' self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) def test_obj_fields(self): @base.WatcherObjectRegistry.register_if(False) class TestObj(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): fields = {'foo': fields.IntegerField()} obj_extra_fields = ['bar'] @property def bar(self): return 'this is bar' obj = TestObj(self.context) self.assertEqual(set(['created_at', 'updated_at', 'deleted_at', 'foo', 'bar']), set(obj.obj_fields)) def test_refresh_object(self): @base.WatcherObjectRegistry.register_if(False) class TestObj(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): fields = {'foo': fields.IntegerField(), 'bar': fields.StringField()} obj = TestObj(self.context) current_obj = TestObj(self.context) obj.foo = 10 obj.bar = 'obj.bar' current_obj.foo = 2 current_obj.bar = 'current.bar' obj.obj_refresh(current_obj) self.assertEqual(obj.foo, 2) self.assertEqual(obj.bar, 'current.bar') def test_obj_constructor(self): obj = MyObj(self.context, foo=123, bar='abc') self.assertEqual(123, obj.foo) self.assertEqual('abc', obj.bar) self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) def test_assign_value_without_DictCompat(self): class TestObj(base.WatcherObject): fields = {'foo': fields.IntegerField(), 'bar': fields.StringField()} obj = TestObj(self.context) obj.foo = 10 err_message = '' try: obj['bar'] = 'value' except TypeError as e: err_message = str(e) finally: self.assertIn("'TestObj' object does not support item assignment", err_message) class TestObject(_LocalTest, _TestObject): pass # The hashes are help developers to check if the change of objects need a # version bump. It is md5 hash of object fields and remotable methods. # The fingerprint values should only be changed if there is a version bump. expected_object_fingerprints = { 'Goal': '1.0-93881622db05e7b67a65ca885b4a022e', 'Strategy': '1.1-73f164491bdd4c034f48083a51bdeb7b', 'AuditTemplate': '1.1-b291973ffc5efa2c61b24fe34fdccc0b', 'Audit': '1.7-19bc991c0b048263df021a36c8624f4d', 'ActionPlan': '2.2-3331270cb3666c93408934826d03c08d', 'Action': '2.0-1dd4959a7e7ac30c62ef170fe08dd935', 'EfficacyIndicator': '1.0-655b71234a82bc7478aff964639c4bb0', 'ScoringEngine': '1.0-4abbe833544000728e17bd9e83f97576', 'Service': '1.0-4b35b99ada9677a882c9de2b30212f35', 'MyObj': '1.5-23c516d1e842f365f694e688d34e47c3', 'ActionDescription': '1.0-5761a3d16651046e7a0c357b57a6583e' } def get_watcher_objects(): """Get Watcher versioned objects This returns a dict of versioned objects which are in the Watcher project namespace only. ie excludes objects from os-vif and other 3rd party modules :return: a dict mapping class names to lists of versioned objects """ all_classes = base.WatcherObjectRegistry.obj_classes() watcher_classes = {} for name in all_classes: objclasses = all_classes[name] if (objclasses[0].OBJ_PROJECT_NAMESPACE != base.WatcherObject.OBJ_PROJECT_NAMESPACE): continue watcher_classes[name] = objclasses return watcher_classes class TestObjectVersions(test_base.TestCase): def test_object_version_check(self): classes = base.WatcherObjectRegistry.obj_classes() checker = object_fixture.ObjectVersionChecker(obj_classes=classes) # Compute the difference between actual fingerprints and # expect fingerprints. expect = actual = {} if there is no change. expect, actual = checker.test_hashes(expected_object_fingerprints) self.assertEqual(expect, actual, "Some objects fields or remotable methods have been " "modified. Please make sure the version of those " "objects have been bumped and then update " "expected_object_fingerprints with the new hashes. ") class TestObjectSerializer(test_base.TestCase): def test_object_serialization(self): obj_ser = base.WatcherObjectSerializer() obj = MyObj(self.context) primitive = obj_ser.serialize_entity(self.context, obj) self.assertIn('watcher_object.name', primitive) obj2 = obj_ser.deserialize_entity(self.context, primitive) self.assertIsInstance(obj2, MyObj) self.assertEqual(self.context, obj2._context) def test_object_serialization_iterables(self): obj_ser = base.WatcherObjectSerializer() obj = MyObj(self.context) for iterable in (list, tuple, set): thing = iterable([obj]) primitive = obj_ser.serialize_entity(self.context, thing) self.assertEqual(1, len(primitive)) for item in primitive: self.assertFalse(isinstance(item, base.WatcherObject)) thing2 = obj_ser.deserialize_entity(self.context, primitive) self.assertEqual(1, len(thing2)) for item in thing2: self.assertIsInstance(item, MyObj) @mock.patch('watcher.objects.base.WatcherObject.indirection_api') def _test_deserialize_entity_newer(self, obj_version, backported_to, mock_indirection_api, my_version='1.6'): obj_ser = base.WatcherObjectSerializer() mock_indirection_api.object_backport_versions.return_value \ = 'backported' @base.WatcherObjectRegistry.register class MyTestObj(MyObj): VERSION = my_version obj = MyTestObj(self.context) obj.VERSION = obj_version primitive = obj.obj_to_primitive() result = obj_ser.deserialize_entity(self.context, primitive) if backported_to is None: self.assertFalse( mock_indirection_api.object_backport_versions.called) else: self.assertEqual('backported', result) versions = object_base.obj_tree_get_versions('MyTestObj') mock_indirection_api.object_backport_versions.assert_called_with( self.context, primitive, versions) def test_deserialize_entity_newer_version_backports(self): "Test object with unsupported (newer) version" self._test_deserialize_entity_newer('1.25', '1.6') def test_deserialize_entity_same_revision_does_not_backport(self): "Test object with supported revision" self._test_deserialize_entity_newer('1.6', None) def test_deserialize_entity_newer_revision_does_not_backport_zero(self): "Test object with supported revision" self._test_deserialize_entity_newer('1.6.0', None) def test_deserialize_entity_newer_revision_does_not_backport(self): "Test object with supported (newer) revision" self._test_deserialize_entity_newer('1.6.1', None) def test_deserialize_entity_newer_version_passes_revision(self): "Test object with unsupported (newer) version and revision" self._test_deserialize_entity_newer('1.7', '1.6.1', my_version='1.6.1') class TestRegistry(test_base.TestCase): @mock.patch('watcher.objects.base.objects') def test_hook_chooses_newer_properly(self, mock_objects): mock_objects.MyObj.VERSION = MyObj.VERSION reg = base.WatcherObjectRegistry() reg.registration_hook(MyObj, 0) class MyNewerObj(object): VERSION = '1.123' @classmethod def obj_name(cls): return 'MyObj' self.assertEqual(MyObj, mock_objects.MyObj) reg.registration_hook(MyNewerObj, 0) self.assertEqual(MyNewerObj, mock_objects.MyObj) @mock.patch('watcher.objects.base.objects') def test_hook_keeps_newer_properly(self, mock_objects): mock_objects.MyObj.VERSION = MyObj.VERSION reg = base.WatcherObjectRegistry() reg.registration_hook(MyObj, 0) class MyOlderObj(object): VERSION = '1.1' @classmethod def obj_name(cls): return 'MyObj' self.assertEqual(MyObj, mock_objects.MyObj) reg.registration_hook(MyOlderObj, 0) self.assertEqual(MyObj, mock_objects.MyObj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/test_scoring_engine.py0000664000175000017500000001673100000000000025517 0ustar00zuulzuul00000000000000# Copyright 2016 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_utils import timeutils from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestScoringEngineObject(base.DbTestCase): def setUp(self): super(TestScoringEngineObject, self).setUp() self.fake_scoring_engine = utils.get_test_scoring_engine( created_at=timeutils.utcnow()) @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') def test_get_by_id(self, mock_get_scoring_engine): scoring_engine_id = self.fake_scoring_engine['id'] mock_get_scoring_engine.return_value = self.fake_scoring_engine scoring_engine = objects.ScoringEngine.get_by_id( self.context, scoring_engine_id) mock_get_scoring_engine.assert_called_once_with( self.context, scoring_engine_id) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') def test_get_by_uuid(self, mock_get_scoring_engine): se_uuid = self.fake_scoring_engine['uuid'] mock_get_scoring_engine.return_value = self.fake_scoring_engine scoring_engine = objects.ScoringEngine.get_by_uuid( self.context, se_uuid) mock_get_scoring_engine.assert_called_once_with( self.context, se_uuid) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') def test_get_by_name(self, mock_get_scoring_engine): scoring_engine_uuid = self.fake_scoring_engine['uuid'] mock_get_scoring_engine.return_value = self.fake_scoring_engine scoring_engine = objects.ScoringEngine.get( self.context, scoring_engine_uuid) mock_get_scoring_engine.assert_called_once_with( self.context, scoring_engine_uuid) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'get_scoring_engine_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_scoring_engine] scoring_engines = objects.ScoringEngine.list(self.context) self.assertEqual(1, mock_get_list.call_count, 1) self.assertEqual(1, len(scoring_engines)) self.assertIsInstance(scoring_engines[0], objects.ScoringEngine) self.assertEqual(self.context, scoring_engines[0]._context) @mock.patch.object(db_api.Connection, 'create_scoring_engine') def test_create(self, mock_create_scoring_engine): mock_create_scoring_engine.return_value = self.fake_scoring_engine scoring_engine = objects.ScoringEngine( self.context, **self.fake_scoring_engine) scoring_engine.create() expected_scoring_engine = self.fake_scoring_engine.copy() expected_scoring_engine['created_at'] = expected_scoring_engine[ 'created_at'].replace(tzinfo=datetime.timezone.utc) mock_create_scoring_engine.assert_called_once_with( expected_scoring_engine) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'destroy_scoring_engine') @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') def test_destroy(self, mock_get_scoring_engine, mock_destroy_scoring_engine): mock_get_scoring_engine.return_value = self.fake_scoring_engine _id = self.fake_scoring_engine['id'] scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) scoring_engine.destroy() mock_get_scoring_engine.assert_called_once_with(self.context, _id) mock_destroy_scoring_engine.assert_called_once_with(_id) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'update_scoring_engine') @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') def test_save(self, mock_get_scoring_engine, mock_update_scoring_engine): mock_get_scoring_engine.return_value = self.fake_scoring_engine fake_saved_scoring_engine = self.fake_scoring_engine.copy() fake_saved_scoring_engine['updated_at'] = timeutils.utcnow() mock_update_scoring_engine.return_value = fake_saved_scoring_engine uuid = self.fake_scoring_engine['uuid'] scoring_engine = objects.ScoringEngine.get_by_uuid(self.context, uuid) scoring_engine.description = 'UPDATED DESCRIPTION' scoring_engine.save() mock_get_scoring_engine.assert_called_once_with(self.context, uuid) mock_update_scoring_engine.assert_called_once_with( uuid, {'description': 'UPDATED DESCRIPTION'}) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') def test_refresh(self, mock_get_scoring_engine): returns = [ dict(self.fake_scoring_engine, description="first description"), dict(self.fake_scoring_engine, description="second description")] mock_get_scoring_engine.side_effect = returns _id = self.fake_scoring_engine['id'] expected = [mock.call(self.context, _id), mock.call(self.context, _id)] scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) self.assertEqual("first description", scoring_engine.description) scoring_engine.refresh() self.assertEqual("second description", scoring_engine.description) self.assertEqual(expected, mock_get_scoring_engine.call_args_list) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'soft_delete_scoring_engine') @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') def test_soft_delete(self, mock_get_scoring_engine, mock_soft_delete): mock_get_scoring_engine.return_value = self.fake_scoring_engine fake_deleted_scoring_engine = self.fake_scoring_engine.copy() fake_deleted_scoring_engine['deleted_at'] = timeutils.utcnow() mock_soft_delete.return_value = fake_deleted_scoring_engine expected_scoring_engine = fake_deleted_scoring_engine.copy() expected_scoring_engine['created_at'] = expected_scoring_engine[ 'created_at'].replace(tzinfo=datetime.timezone.utc) expected_scoring_engine['deleted_at'] = expected_scoring_engine[ 'deleted_at'].replace(tzinfo=datetime.timezone.utc) _id = self.fake_scoring_engine['id'] scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) scoring_engine.soft_delete() mock_get_scoring_engine.assert_called_once_with(self.context, _id) mock_soft_delete.assert_called_once_with(_id) self.assertEqual(self.context, scoring_engine._context) self.assertEqual(expected_scoring_engine, scoring_engine.as_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/test_service.py0000664000175000017500000001200400000000000024153 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_utils import timeutils from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestServiceObject(base.DbTestCase): def setUp(self): super(TestServiceObject, self).setUp() self.fake_service = utils.get_test_service( created_at=timeutils.utcnow()) @mock.patch.object(db_api.Connection, 'get_service_by_id') def test_get_by_id(self, mock_get_service): service_id = self.fake_service['id'] mock_get_service.return_value = self.fake_service service = objects.Service.get(self.context, service_id) mock_get_service.assert_called_once_with(self.context, service_id) self.assertEqual(self.context, service._context) @mock.patch.object(db_api.Connection, 'get_service_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_service] services = objects.Service.list(self.context) self.assertEqual(1, mock_get_list.call_count, 1) self.assertEqual(1, len(services)) self.assertIsInstance(services[0], objects.Service) self.assertEqual(self.context, services[0]._context) @mock.patch.object(db_api.Connection, 'create_service') def test_create(self, mock_create_service): mock_create_service.return_value = self.fake_service service = objects.Service(self.context, **self.fake_service) service.create() expected_service = self.fake_service.copy() expected_service['created_at'] = expected_service[ 'created_at'].replace(tzinfo=datetime.timezone.utc) mock_create_service.assert_called_once_with(expected_service) self.assertEqual(self.context, service._context) @mock.patch.object(db_api.Connection, 'update_service') @mock.patch.object(db_api.Connection, 'get_service_by_id') def test_save(self, mock_get_service, mock_update_service): mock_get_service.return_value = self.fake_service fake_saved_service = self.fake_service.copy() fake_saved_service['updated_at'] = timeutils.utcnow() mock_update_service.return_value = fake_saved_service _id = self.fake_service['id'] service = objects.Service.get(self.context, _id) service.name = 'UPDATED NAME' service.save() mock_get_service.assert_called_once_with(self.context, _id) mock_update_service.assert_called_once_with( _id, {'name': 'UPDATED NAME'}) self.assertEqual(self.context, service._context) @mock.patch.object(db_api.Connection, 'get_service_by_id') def test_refresh(self, mock_get_service): returns = [dict(self.fake_service, name="first name"), dict(self.fake_service, name="second name")] mock_get_service.side_effect = returns _id = self.fake_service['id'] expected = [mock.call(self.context, _id), mock.call(self.context, _id)] service = objects.Service.get(self.context, _id) self.assertEqual("first name", service.name) service.refresh() self.assertEqual("second name", service.name) self.assertEqual(expected, mock_get_service.call_args_list) self.assertEqual(self.context, service._context) @mock.patch.object(db_api.Connection, 'soft_delete_service') @mock.patch.object(db_api.Connection, 'get_service_by_id') def test_soft_delete(self, mock_get_service, mock_soft_delete): mock_get_service.return_value = self.fake_service fake_deleted_service = self.fake_service.copy() fake_deleted_service['deleted_at'] = timeutils.utcnow() mock_soft_delete.return_value = fake_deleted_service expected_service = fake_deleted_service.copy() expected_service['created_at'] = expected_service[ 'created_at'].replace(tzinfo=datetime.timezone.utc) expected_service['deleted_at'] = expected_service[ 'deleted_at'].replace(tzinfo=datetime.timezone.utc) _id = self.fake_service['id'] service = objects.Service.get(self.context, _id) service.soft_delete() mock_get_service.assert_called_once_with(self.context, _id) mock_soft_delete.assert_called_once_with(_id) self.assertEqual(self.context, service._context) self.assertEqual(expected_service, service.as_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/test_strategy.py0000664000175000017500000001630400000000000024364 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from watcher.common import exception from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestStrategyObject(base.DbTestCase): goal_id = 2 scenarios = [ ('non_eager', dict( eager=False, fake_strategy=utils.get_test_strategy( goal_id=goal_id))), ('eager_with_non_eager_load', dict( eager=True, fake_strategy=utils.get_test_strategy( goal_id=goal_id))), ('eager_with_eager_load', dict( eager=True, fake_strategy=utils.get_test_strategy( goal_id=goal_id, goal=utils.get_test_goal(id=goal_id)))), ] def setUp(self): super(TestStrategyObject, self).setUp() self.fake_goal = utils.create_test_goal(id=self.goal_id) def eager_load_strategy_assert(self, strategy): if self.eager: self.assertIsNotNone(strategy.goal) fields_to_check = set( super(objects.Goal, objects.Goal).fields ).symmetric_difference(objects.Goal.fields) db_data = { k: v for k, v in self.fake_goal.as_dict().items() if k in fields_to_check} object_data = { k: v for k, v in strategy.goal.as_dict().items() if k in fields_to_check} self.assertEqual(db_data, object_data) @mock.patch.object(db_api.Connection, 'get_strategy_by_id') def test_get_by_id(self, mock_get_strategy): strategy_id = self.fake_strategy['id'] mock_get_strategy.return_value = self.fake_strategy strategy = objects.Strategy.get( self.context, strategy_id, eager=self.eager) mock_get_strategy.assert_called_once_with( self.context, strategy_id, eager=self.eager) self.assertEqual(self.context, strategy._context) self.eager_load_strategy_assert(strategy) @mock.patch.object(db_api.Connection, 'get_strategy_by_uuid') def test_get_by_uuid(self, mock_get_strategy): uuid = self.fake_strategy['uuid'] mock_get_strategy.return_value = self.fake_strategy strategy = objects.Strategy.get(self.context, uuid, eager=self.eager) mock_get_strategy.assert_called_once_with( self.context, uuid, eager=self.eager) self.assertEqual(self.context, strategy._context) self.eager_load_strategy_assert(strategy) def test_get_bad_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.Strategy.get, self.context, 'not-a-uuid') @mock.patch.object(db_api.Connection, 'get_strategy_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_strategy] strategies = objects.Strategy.list(self.context, eager=self.eager) self.assertEqual(1, mock_get_list.call_count, 1) self.assertEqual(1, len(strategies)) self.assertIsInstance(strategies[0], objects.Strategy) self.assertEqual(self.context, strategies[0]._context) for strategy in strategies: self.eager_load_strategy_assert(strategy) @mock.patch.object(db_api.Connection, 'update_strategy') @mock.patch.object(db_api.Connection, 'get_strategy_by_id') def test_save(self, mock_get_strategy, mock_update_strategy): _id = self.fake_strategy['id'] mock_get_strategy.return_value = self.fake_strategy strategy = objects.Strategy.get_by_id( self.context, _id, eager=self.eager) strategy.name = 'UPDATED NAME' strategy.save() mock_get_strategy.assert_called_once_with( self.context, _id, eager=self.eager) mock_update_strategy.assert_called_once_with( _id, {'name': 'UPDATED NAME'}) self.assertEqual(self.context, strategy._context) self.eager_load_strategy_assert(strategy) @mock.patch.object(db_api.Connection, 'get_strategy_by_id') def test_refresh(self, mock_get_strategy): _id = self.fake_strategy['id'] returns = [dict(self.fake_strategy, name="first name"), dict(self.fake_strategy, name="second name")] mock_get_strategy.side_effect = returns expected = [mock.call(self.context, _id, eager=self.eager), mock.call(self.context, _id, eager=self.eager)] strategy = objects.Strategy.get(self.context, _id, eager=self.eager) self.assertEqual("first name", strategy.name) strategy.refresh(eager=self.eager) self.assertEqual("second name", strategy.name) self.assertEqual(expected, mock_get_strategy.call_args_list) self.assertEqual(self.context, strategy._context) self.eager_load_strategy_assert(strategy) class TestCreateDeleteStrategyObject(base.DbTestCase): def setUp(self): super(TestCreateDeleteStrategyObject, self).setUp() self.fake_goal = utils.create_test_goal() self.fake_strategy = utils.get_test_strategy(goal_id=self.fake_goal.id) @mock.patch.object(db_api.Connection, 'create_strategy') def test_create(self, mock_create_strategy): mock_create_strategy.return_value = self.fake_strategy strategy = objects.Strategy(self.context, **self.fake_strategy) strategy.create() mock_create_strategy.assert_called_once_with(self.fake_strategy) self.assertEqual(self.context, strategy._context) @mock.patch.object(db_api.Connection, 'soft_delete_strategy') @mock.patch.object(db_api.Connection, 'get_strategy_by_id') def test_soft_delete(self, mock_get_strategy, mock_soft_delete): _id = self.fake_strategy['id'] mock_get_strategy.return_value = self.fake_strategy strategy = objects.Strategy.get_by_id(self.context, _id) strategy.soft_delete() mock_get_strategy.assert_called_once_with( self.context, _id, eager=False) mock_soft_delete.assert_called_once_with(_id) self.assertEqual(self.context, strategy._context) @mock.patch.object(db_api.Connection, 'destroy_strategy') @mock.patch.object(db_api.Connection, 'get_strategy_by_id') def test_destroy(self, mock_get_strategy, mock_destroy_strategy): _id = self.fake_strategy['id'] mock_get_strategy.return_value = self.fake_strategy strategy = objects.Strategy.get_by_id(self.context, _id) strategy.destroy() mock_get_strategy.assert_called_once_with( self.context, _id, eager=False) mock_destroy_strategy.assert_called_once_with(_id) self.assertEqual(self.context, strategy._context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/objects/utils.py0000664000175000017500000001757200000000000022633 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Watcher object test utilities.""" from watcher import objects from watcher.tests.db import utils as db_utils def _load_related_objects(context, cls, db_data): """Replace the DB data with its object counterpart""" obj_data = db_data.copy() for name, (obj_cls, _) in cls.object_fields.items(): if obj_data.get(name): obj_data[name] = obj_cls(context, **obj_data.get(name).as_dict()) else: del obj_data[name] return obj_data def _load_test_obj(context, cls, obj_data, **kw): # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del obj_data['id'] obj = cls(context) for key in obj_data: setattr(obj, key, obj_data[key]) return obj def get_test_audit_template(context, **kw): """Return a AuditTemplate object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.AuditTemplate db_data = db_utils.get_test_audit_template(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_audit_template(context, **kw): """Create and return a test audit_template object. Create a audit template in the DB and return an AuditTemplate object with appropriate attributes. """ audit_template = get_test_audit_template(context, **kw) audit_template.create() return audit_template def get_test_audit(context, **kw): """Return a Audit object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.Audit db_data = db_utils.get_test_audit(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_audit(context, **kw): """Create and return a test audit object. Create a audit in the DB and return an Audit object with appropriate attributes. """ audit = get_test_audit(context, **kw) audit.create() return audit def get_test_action_plan(context, **kw): """Return a ActionPlan object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.ActionPlan db_data = db_utils.get_test_action_plan(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_action_plan(context, **kw): """Create and return a test action_plan object. Create a action plan in the DB and return a ActionPlan object with appropriate attributes. """ action_plan = get_test_action_plan(context, **kw) action_plan.create() return action_plan def get_test_action(context, **kw): """Return a Action object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.Action db_data = db_utils.get_test_action(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_action(context, **kw): """Create and return a test action object. Create a action in the DB and return a Action object with appropriate attributes. """ action = get_test_action(context, **kw) action.create() return action def get_test_goal(context, **kw): """Return a Goal object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.Goal db_data = db_utils.get_test_goal(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_goal(context, **kw): """Create and return a test goal object. Create a goal in the DB and return a Goal object with appropriate attributes. """ goal = get_test_goal(context, **kw) goal.create() return goal def get_test_scoring_engine(context, **kw): """Return a ScoringEngine object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.ScoringEngine db_data = db_utils.get_test_scoring_engine(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_scoring_engine(context, **kw): """Create and return a test scoring engine object. Create a scoring engine in the DB and return a ScoringEngine object with appropriate attributes. """ scoring_engine = get_test_scoring_engine(context, **kw) scoring_engine.create() return scoring_engine def get_test_service(context, **kw): """Return a Service object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.Service db_data = db_utils.get_test_service(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_service(context, **kw): """Create and return a test service object. Create a service in the DB and return a Service object with appropriate attributes. """ service = get_test_service(context, **kw) service.create() return service def get_test_strategy(context, **kw): """Return a Strategy object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.Strategy db_data = db_utils.get_test_strategy(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_strategy(context, **kw): """Create and return a test strategy object. Create a strategy in the DB and return a Strategy object with appropriate attributes. """ strategy = get_test_strategy(context, **kw) strategy.create() return strategy def get_test_efficacy_indicator(context, **kw): """Return a EfficacyIndicator object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.EfficacyIndicator db_data = db_utils.get_test_efficacy_indicator(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_efficacy_indicator(context, **kw): """Create and return a test efficacy indicator object. Create a efficacy indicator in the DB and return a EfficacyIndicator object with appropriate attributes. """ efficacy_indicator = get_test_efficacy_indicator(context, **kw) efficacy_indicator.create() return efficacy_indicator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/policy_fixture.py0000664000175000017500000000310700000000000023074 0ustar00zuulzuul00000000000000# Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import fixtures from oslo_config import cfg from oslo_policy import _parser from oslo_policy import opts as policy_opts from watcher.common import policy as watcher_policy from watcher.tests import fake_policy CONF = cfg.CONF class PolicyFixture(fixtures.Fixture): def _setUp(self): self.policy_dir = self.useFixture(fixtures.TempDir()) self.policy_file_name = os.path.join(self.policy_dir.path, 'policy.yaml') with open(self.policy_file_name, 'w') as policy_file: policy_file.write(fake_policy.policy_data) policy_opts.set_defaults(CONF) CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy') watcher_policy._ENFORCER = None self.addCleanup(watcher_policy.init().clear) def set_rules(self, rules): policy = watcher_policy._ENFORCER policy.set_rules({k: _parser.parse_rule(v) for k, v in rules.items()}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/tests/test_threading.py0000664000175000017500000001327100000000000023036 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import futurist from unittest import mock from watcher.decision_engine import threading from watcher.tests import base class TestDecisionEngineThreadPool(base.TestCase): def setUp(self): super(TestDecisionEngineThreadPool, self).setUp() self.m_function = mock.Mock() self.m_function.return_value = None self.m_do_while_function = mock.Mock() self.m_do_while_function.return_value = None # override the underlying threadpool for testing # this is like a 'fixture' were the original state of the singleton # is restored after these tests finish but the threadpool can still # be used as intended with its methods self.p_threadool = mock.patch.object( threading, 'DecisionEngineThreadPool', new=threading.DecisionEngineThreadPool) self.m_threadpool = self.p_threadool.start() self.addCleanup(self.p_threadool.stop) # bind unbound patched methods for python 2.7 compatibility # class methods can be used unbounded in Python 3.x self.m_threadpool.submit = self.m_threadpool.submit.__get__( self.m_threadpool, threading.DecisionEngineThreadPool) # perform all tests synchronously self.m_threadpool._threadpool = futurist.SynchronousExecutor() def test_singleton(self): """Ensure only one object of DecisionEngineThreadPool can be created""" threadpool1 = threading.DecisionEngineThreadPool() threadpool2 = threading.DecisionEngineThreadPool() self.assertEqual(threadpool1, threadpool2) def test_fixture_not_singleton(self): """Ensure the fixture does create a new instance of the singleton""" threadpool1 = threading.DecisionEngineThreadPool() threadpool2 = self.m_threadpool self.assertNotEqual(threadpool1, threadpool2) def test_do_while(self): """Test the regular operation of the threadpool and do_while_futures With the regular operation of do_while_futures the collection of futures will be shallow copied and left unmodified to the caller. """ # create a collection of futures from submitted m_function tasks futures = [self.m_threadpool.submit(self.m_function, 1, 2)] self.m_function.assert_called_once_with(1, 2) # execute m_do_while_function for every future that completes # and block until all futures are completed self.m_threadpool.do_while_futures( futures, self.m_do_while_function, 3, 4) # assert that m_do_while_function was called self.m_do_while_function.assert_called_once_with(futures[0], 3, 4) # assert that the collection of futures is unmodified self.assertEqual(1, len(futures)) def test_do_while_modify(self): """Test the operation of the threadpool and do_while_futures_modify The do_while_future_modify function has slightly better performance because it will not create a copy of the collection and will modify it directly. """ # create a collection of futures from submitted m_function tasks futures = [self.m_threadpool.submit(self.m_function, 1, 2)] self.m_function.assert_called_once_with(1, 2) # hold reference because element is going to be removed from the list future_ref = futures[0] # execute m_do_while_function for every future that completes # and block until all futures are completed self.m_threadpool.do_while_futures_modify( futures, self.m_do_while_function, 3, 4) # assert that m_do_while_function was called self.m_do_while_function.assert_called_once_with(future_ref, 3, 4) # assert that the collection of futures is modified self.assertEqual(0, len(futures)) def test_multiple_tasks(self): """Test that 10 tasks are all executed with the correct arguments""" # create a collection of 10 futures from submitted m_function tasks futures = [self.m_threadpool.submit( self.m_function, i, 2) for i in range(10)] # assert that there are 10 submitted tasks self.assertEqual(10, len(futures)) # execute m_do_while_function for every future that completes # and block until all futures are completed self.m_threadpool.do_while_futures( futures, self.m_do_while_function, 3, 4) # create list of 10 calls that should have occurred calls_submit = [] for i in range(10): calls_submit.append(mock.call(i, 2)) # test that the submit function has been called 10 times self.m_function.assert_has_calls( calls_submit, any_order=True) # create list of 10 calls that should have occurred calls_do_while = [] for i in range(10): calls_do_while.append(mock.call(futures[i], 3, 4)) # test that the passed do_while function has been called 10 times self.m_do_while_function.assert_has_calls( calls_do_while, any_order=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591576.0 python_watcher-14.0.0/watcher/version.py0000664000175000017500000000137000000000000020352 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('python-watcher') version_string = version_info.version_string()