pax_global_header 0000666 0000000 0000000 00000000064 15132464062 0014515 g ustar 00root root 0000000 0000000 52 comment=cd24dcb5caae151e668cd5ea562704c4c60b5f08
placement-14.0.0+git20260116.35.cd24dcb5/ 0000775 0000000 0000000 00000000000 15132464062 0016667 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/.coveragerc 0000664 0000000 0000000 00000000100 15132464062 0020777 0 ustar 00root root 0000000 0000000 [run]
branch = True
source = placement
omit = placement/tests/*
placement-14.0.0+git20260116.35.cd24dcb5/.gitignore 0000664 0000000 0000000 00000001214 15132464062 0020655 0 ustar 00root root 0000000 0000000 *.DS_Store
*.egg*
*.log
*.mo
*.pyc
*.swo
*.swp
*.sqlite
*~
.autogenerated
.coverage
.nova-venv
.project
.pydevproject
.ropeproject
.stestr/
.testrepository/
.tox
.idea
.venv
AUTHORS
Authors
build-stamp
tags
build/*
CA/
ChangeLog
coverage.xml
cover/*
covhtml/*
dist/*
doc/build/*
api-guide/build/*
api-ref/build/*
placement-api-ref/build/*
etc/placement/placement.conf.sample
etc/placement/policy.yaml.sample
etc/placement/policy.yaml.merged
instances
keeper
keys
local_settings.py
MANIFEST
nosetests.xml
doc/source/_static/placement.conf.sample
doc/source/_static/placement.policy.yaml.sample
# Files created by releasenotes build
releasenotes/build
placement-14.0.0+git20260116.35.cd24dcb5/.gitreview 0000664 0000000 0000000 00000000114 15132464062 0020671 0 ustar 00root root 0000000 0000000 [gerrit]
host=review.opendev.org
port=29418
project=openstack/placement.git
placement-14.0.0+git20260116.35.cd24dcb5/.pre-commit-config.yaml 0000664 0000000 0000000 00000002116 15132464062 0023150 0 ustar 00root root 0000000 0000000 ---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
- id: trailing-whitespace
- id: mixed-line-ending
args: ['--fix', 'lf']
exclude: '.*\.(svg)$'
- id: fix-byte-order-marker
- id: check-executables-have-shebangs
- id: check-merge-conflict
- id: debug-statements
- id: check-json
files: .*\.json$
- id: check-yaml
files: .*\.(yaml|yml)$
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.5.5
hooks:
- id: remove-tabs
exclude: '.*\.(svg)$'
- repo: https://opendev.org/openstack/hacking
rev: 8.0.0
hooks:
- id: hacking
additional_dependencies: []
exclude: '^(doc|releasenotes|tools)/.*$'
- repo: https://github.com/hhatto/autopep8
rev: v2.3.2
hooks:
- id: autopep8
files: '^.*\.py$'
- repo: https://github.com/sphinx-contrib/sphinx-lint
rev: v1.0.1
hooks:
- id: sphinx-lint
args: [--enable=default-role]
files: ^doc/|releasenotes|api-guide
types: [rst]
placement-14.0.0+git20260116.35.cd24dcb5/.stestr.conf 0000664 0000000 0000000 00000001230 15132464062 0021134 0 ustar 00root root 0000000 0000000 [DEFAULT]
test_path=./placement/tests/unit
top_dir=./
# The group_regex describes how stestr will group tests into the same process
# when running concurrently. The following ensures that gabbi tests coming from
# the same YAML file are all in the same process. This is important because
# each YAML file represents an ordered sequence of HTTP requests. Note that
# tests which do not match this regex will not be grouped in any special way.
# See the following for more details.
# http://stestr.readthedocs.io/en/latest/MANUAL.html#grouping-tests
# https://gabbi.readthedocs.io/en/latest/#purpose
group_regex=placement\.tests\.functional\.test_api(?:\.|_)([^_]+)
placement-14.0.0+git20260116.35.cd24dcb5/.zuul.yaml 0000664 0000000 0000000 00000010150 15132464062 0020625 0 ustar 00root root 0000000 0000000 # Initial set of jobs that will be extended over time as
# we get things working.
- project:
templates:
# The integrated-gate-placement template adds the
# tempest-integrated-placement and grenade jobs.
# tempest-integrated-placement runs a subset of tempest tests which are
# relevant for placement, e.g. it does not run keystone tests.
- check-requirements
- integrated-gate-placement
- openstack-cover-jobs
- openstack-python3-jobs
- periodic-stable-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
check:
jobs:
- openstack-tox-functional-py310
- openstack-tox-functional-py313
- openstack-tox-pep8
- placement-nova-tox-functional-py313
- placement-nested-perfload:
voting: false
- placement-perfload:
voting: false
- tempest-integrated-placement:
# Alias 'gate-irrelevant-files' define the set of irrelevant-files
# for which integrated testing jobs not required to run. If
# changes are only to those files then, zuul can skip the
# integrated testing job to save the infra resources.
# 'gate-irrelevant-files' should be used for integrated gate
# jobs only not for any other jobs like functional, unit, doc
# jobs.
irrelevant-files: &gate-irrelevant-files
- ^.*\.rst$
- ^api-ref/.*$
- ^.git.*$
- ^doc/.*$
- ^placement/tests/.*$
- ^\.pre-commit-config\.yaml$
- ^releasenotes/.*$
- ^tools/.*$
- ^tox.ini$
- grenade:
irrelevant-files: *gate-irrelevant-files
- grenade-skip-level:
irrelevant-files: *gate-irrelevant-files
- tempest-ipv6-only:
irrelevant-files: *gate-irrelevant-files
gate:
jobs:
- openstack-tox-functional-py310
- openstack-tox-functional-py313
- openstack-tox-pep8
- placement-nova-tox-functional-py313
- tempest-integrated-placement:
irrelevant-files: *gate-irrelevant-files
- grenade:
irrelevant-files: *gate-irrelevant-files
- grenade-skip-level:
irrelevant-files: *gate-irrelevant-files
- tempest-ipv6-only:
irrelevant-files: *gate-irrelevant-files
periodic-weekly:
jobs:
# update the python version when the support runtime for testing changes.
# we only test the latest version in the periodics as its just a signal
# that we need to investigate the health of the master branch in the absence
# of frequent patches.
- openstack-tox-functional-py313
- openstack-tox-py313
- placement-nova-tox-functional-py313
- tempest-integrated-placement
- job:
name: placement-nova-tox-functional-py313
parent: nova-tox-functional-py313
description: |
Run the nova functional tests to confirm that we aren't breaking
the PlacementFixture.
vars:
# 'functional-without-sample-db-tests' tox env is defined in nova tox.ini
# to skip the api|notification _sample_tests and db-related tests.
tox_envlist: functional-without-sample-db-tests
- job:
name: placement-perfload
parent: base
description: |
A simple node on which to run placement with the barest of configs and
make performance related tests against it.
required-projects:
- opendev.org/openstack/placement
irrelevant-files:
- ^.*\.rst$
- ^api-ref/.*$
- ^doc/.*$
- ^.git.*$
- ^placement/tests/.*$
- ^\.pre-commit-config\.yaml$
- ^releasenotes/.*$
- ^tox.ini$
run: playbooks/perfload.yaml
post-run: playbooks/post.yaml
- job:
name: placement-nested-perfload
parent: placement-perfload
description: |
A simple node on which to run placement with the barest of configs and
make nested performance related tests against it.
timeout: 3600
run: playbooks/nested-perfload.yaml
placement-14.0.0+git20260116.35.cd24dcb5/CONTRIBUTING.rst 0000664 0000000 0000000 00000001154 15132464062 0021331 0 ustar 00root root 0000000 0000000 The source repository for this project can be found at:
https://opendev.org/openstack/placement
Pull requests submitted through GitHub are not monitored.
To start contributing to OpenStack, follow the steps in the contribution guide
to set up and use Gerrit:
https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
Bugs should be filed on launchpad:
https://bugs.launchpad.net/placement/+filebug
For more specific information about contributing to this repository, see the
placement contributor guide:
https://docs.openstack.org/placement/latest/contributor/contributing.html
placement-14.0.0+git20260116.35.cd24dcb5/LICENSE 0000664 0000000 0000000 00000023637 15132464062 0017707 0 ustar 00root root 0000000 0000000
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
placement-14.0.0+git20260116.35.cd24dcb5/README.rst 0000664 0000000 0000000 00000003420 15132464062 0020355 0 ustar 00root root 0000000 0000000 If you are viewing this README on GitHub, please be aware that placement
development happens on `OpenStack git
`_ and `OpenStack
gerrit `_.
===================
OpenStack Placement
===================
.. image:: https://governance.openstack.org/tc/badges/placement.svg
OpenStack Placement provides an HTTP service for managing, selecting,
and claiming providers of classes of inventory representing available
resources in a cloud.
API
---
To learn how to use Placement's API, consult the documentation available
online at:
- `Placement API Reference `__
For more information on OpenStack APIs, SDKs and CLIs in general, refer to:
- `OpenStack for App Developers `__
- `Development resources for OpenStack clouds
`__
Operators
---------
To learn how to deploy and configure OpenStack Placement, consult the
documentation available online at:
- `OpenStack Placement `__
In the unfortunate event that bugs are discovered, they should be reported to
the appropriate bug tracker. If you obtained the software from a 3rd party
operating system vendor, it is often wise to use their own bug tracker for
reporting problems. In all other cases use the master OpenStack bug tracker,
available at:
- `Bug Tracker `__
- `File new Bug `__
Developers
----------
For information on how to contribute to Placement, please see the contents of
CONTRIBUTING.rst.
Further developer focused documentation is available at:
- `Official Placement Documentation `__
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/ 0000775 0000000 0000000 00000000000 15132464062 0020212 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/ext/ 0000775 0000000 0000000 00000000000 15132464062 0021012 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/ext/__init__.py 0000664 0000000 0000000 00000000000 15132464062 0023111 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/ext/validator.py 0000664 0000000 0000000 00000004364 15132464062 0023360 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test to see if docs exists for routes and methods in the placement API."""
import os
from placement import handler
# A humane ordering of HTTP methods for sorted output.
ORDERED_METHODS = ['GET', 'POST', 'PUT', 'PATCH', 'DELETE']
DEPRECATED_METHODS = [('POST', '/resource_providers/{uuid}/inventories')]
def _header_line(map_entry):
method, route = map_entry
line = '.. rest_method:: %s %s' % (method, route)
return line
def inspect_doc(app):
"""Load up doc_files and see if any routes are missing.
The routes are defined in handler.ROUTE_DECLARATIONS.
"""
doc_files = [os.path.join(app.srcdir, file)
for file in os.listdir(app.srcdir) if file.endswith(".inc")]
routes = []
for route in sorted(handler.ROUTE_DECLARATIONS, key=len):
# Skip over the '' route.
if route:
for method in ORDERED_METHODS:
if method in handler.ROUTE_DECLARATIONS[route]:
routes.append((method, route))
header_lines = []
for map_entry in routes:
if map_entry not in DEPRECATED_METHODS:
header_lines.append(_header_line(map_entry))
content_lines = []
for doc_file in doc_files:
with open(doc_file) as doc_fh:
content_lines.extend(doc_fh.read().splitlines())
missing_lines = []
for line in header_lines:
if line not in content_lines:
missing_lines.append(line)
if missing_lines:
msg = ['Documentation likely missing for the following routes:', '']
for line in missing_lines:
msg.append(line)
raise ValueError('\n'.join(msg))
def setup(app):
app.connect('builder-inited', inspect_doc)
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/ 0000775 0000000 0000000 00000000000 15132464062 0021512 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/aggregates.inc 0000664 0000000 0000000 00000012243 15132464062 0024320 0 ustar 00root root 0000000 0000000 ============================
Resource provider aggregates
============================
Each resource provider can be associated with one or more other
resource providers in groups called aggregates. API calls in this
section are used to list and update the aggregates that are associated
with one resource provider.
Provider aggregates are used for modeling relationships among providers.
Examples may include:
* A shared storage pool providing DISK_GB resources to compute node providers
that provide VCPU and MEMORY_MB resources.
* Affinity/anti-affinity relationships such as physical location, power failure
domains, or other reliability/availability constructs.
* Groupings of compute host providers *corresponding to* Nova host aggregates
or availability zones.
.. note:: Placement aggregates are *not* the same as Nova host
aggregates and should not be considered equivalent.
The primary differences between Nova's host aggregates and placement
aggregates are the following:
* In Nova, a host aggregate associates a *nova-compute service* with
other nova-compute services. Placement aggregates are not specific
to a nova-compute service and are, in fact, not
compute-specific at all. A resource provider in the Placement API is
generic, and placement aggregates are simply groups of generic
resource providers. This is an important difference especially for
Ironic, which when used with Nova, has many Ironic baremetal nodes
attached to a single nova-compute service. In the Placement API,
each Ironic baremetal node is its own resource provider and can
therefore be associated to other Ironic baremetal nodes via a
placement aggregate association.
* In Nova, a host aggregate may have *metadata* key/value pairs
attached to it. All nova-compute services associated with a Nova
host aggregate share the same metadata. Placement aggregates have no
such metadata because placement aggregates *only* represent the
grouping of resource providers. In the Placement API, resource
providers are individually decorated with *traits* that provide
qualitative information about the resource provider.
* In Nova, a host aggregate dictates the *availability zone* within
which one or more nova-compute services reside. While placement aggregates
may be used to *model* availability zones, they have no inherent concept
thereof.
.. note:: Aggregates API requests are available starting from version 1.1.
List resource provider aggregates
=================================
.. rest_method:: GET /resource_providers/{uuid}/aggregates
Return a list of aggregates associated with the resource provider
identified by `{uuid}`.
Normal Response Codes: 200
Error response codes: itemNotFound(404) if the provider does not exist. (If the
provider has no aggregates, the result is 200 with an empty aggregate list.)
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
Response (microversions 1.1 - 1.18)
-----------------------------------
.. rest_parameters:: parameters.yaml
- aggregates: aggregates
Response Example (microversions 1.1 - 1.18)
-------------------------------------------
.. literalinclude:: ./samples/aggregates/get-aggregates.json
:language: javascript
Response (microversions 1.19 - )
--------------------------------
.. rest_parameters:: parameters.yaml
- aggregates: aggregates
- resource_provider_generation: resource_provider_generation
Response Example (microversions 1.19 - )
----------------------------------------
.. literalinclude:: ./samples/aggregates/get-aggregates-1.19.json
:language: javascript
Update resource provider aggregates
===================================
Associate a list of aggregates with the resource provider identified by `{uuid}`.
.. rest_method:: PUT /resource_providers/{uuid}/aggregates
Normal Response Codes: 200
Error response codes: badRequest(400), itemNotFound(404), conflict(409)
Request (microversion 1.1 - 1.18)
---------------------------------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
- aggregates: aggregates
Request example (microversion 1.1 - 1.18)
-----------------------------------------
.. literalinclude:: ./samples/aggregates/update-aggregates-request.json
:language: javascript
Request (microversion 1.19 - )
---------------------------------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
- aggregates: aggregates
- resource_provider_generation: resource_provider_generation
Request example (microversion 1.19 - )
-----------------------------------------
.. literalinclude:: ./samples/aggregates/update-aggregates-request-1.19.json
:language: javascript
Response (microversion 1.1 - )
------------------------------
.. rest_parameters:: parameters.yaml
- aggregates: aggregates
- resource_provider_generation: resource_provider_generation_v1_19
Response Example (microversion 1.1 - 1.18)
------------------------------------------
.. literalinclude:: ./samples/aggregates/update-aggregates.json
:language: javascript
Response Example (microversion 1.19 - )
------------------------------------------
.. literalinclude:: ./samples/aggregates/update-aggregates-1.19.json
:language: javascript
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/allocation_candidates.inc 0000664 0000000 0000000 00000007165 15132464062 0026522 0 ustar 00root root 0000000 0000000 =====================
Allocation candidates
=====================
.. note:: Allocation candidates API requests are available starting from version 1.10.
List allocation candidates
==========================
Returns a dictionary representing a collection of allocation requests
and resource provider summaries. Each allocation request has
information to form a ``PUT /allocations/{consumer_uuid}`` request to claim
resources against a related set of resource providers. Additional parameters
might be required, see `Update allocations`_. As several allocation
requests are available it's necessary to select one. To make a
decision, resource provider summaries are provided with the
inventory/capacity information. For example, this information is used by
nova-scheduler's FilterScheduler to make decisions about on which compute host
to build a server.
You can also find additional case studies of the request parameters in the
`Modeling with Provider Trees`_ document.
.. rest_method:: GET /allocation_candidates
Normal Response Codes: 200
Error response codes: badRequest(400)
Request
-------
.. rest_parameters:: parameters.yaml
- resources: resources_query_ac
- required: required_traits_unnumbered
- member_of: allocation_candidates_member_of
- in_tree: allocation_candidates_in_tree
- resourcesN: resources_query_granular
- requiredN: required_traits_granular
- member_ofN: allocation_candidates_member_of_granular
- in_treeN: allocation_candidates_in_tree_granular
- group_policy: allocation_candidates_group_policy
- limit: allocation_candidates_limit
- root_required: allocation_candidates_root_required
- same_subtree: allocation_candidates_same_subtree
Response (microversions 1.12 - )
--------------------------------
.. rest_parameters:: parameters.yaml
- allocation_requests: allocation_requests
- provider_summaries: provider_summaries_1_12
- allocations: allocations_by_resource_provider
- resources: resources
- capacity: capacity
- used: used
- traits: traits_1_17
- parent_provider_uuid: resource_provider_parent_provider_uuid_response_1_29
- root_provider_uuid: resource_provider_root_provider_uuid_1_29
- mappings: mappings
Response Example (microversions 1.34 - )
----------------------------------------
.. literalinclude:: ./samples/allocation_candidates/get-allocation_candidates-1.34.json
:language: javascript
Response Example (microversions 1.29 - 1.33)
--------------------------------------------
.. literalinclude:: ./samples/allocation_candidates/get-allocation_candidates-1.29.json
:language: javascript
Response Example (microversions 1.17 - 1.28)
--------------------------------------------
.. literalinclude:: ./samples/allocation_candidates/get-allocation_candidates-1.17.json
:language: javascript
Response Example (microversions 1.12 - 1.16)
--------------------------------------------
.. literalinclude:: ./samples/allocation_candidates/get-allocation_candidates-1.12.json
:language: javascript
Response (microversions 1.10 - 1.11)
------------------------------------
.. rest_parameters:: parameters.yaml
- allocation_requests: allocation_requests
- provider_summaries: provider_summaries
- allocations: allocations_array
- resource_provider: resource_provider_object
- uuid: resource_provider_uuid
- resources: resources
- capacity: capacity
- used: used
Response Example (microversions 1.10 - 1.11)
--------------------------------------------
.. literalinclude:: ./samples/allocation_candidates/get-allocation_candidates.json
:language: javascript
.. _`Modeling with Provider Trees`: https://docs.openstack.org/placement/latest/usage/provider-tree.html
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/allocations.inc 0000664 0000000 0000000 00000015347 15132464062 0024527 0 ustar 00root root 0000000 0000000 ===========
Allocations
===========
Allocations are records representing resources that have been assigned
and used by some consumer of that resource. They indicate the amount
of a particular resource that has been allocated to a given consumer
of that resource from a particular resource provider.
Manage allocations
==================
Create, update or delete allocations for multiple consumers in a single
request. This allows a client to atomically set or swap allocations for
multiple consumers as may be required during a migration or move type
operation.
The allocations for an individual consumer uuid mentioned in the request
can be removed by setting the `allocations` to an empty object (see the
example below).
**Available as of microversion 1.13.**
.. rest_method:: POST /allocations
Normal response codes: 204
Error response codes: badRequest(400), conflict(409)
* `409 Conflict` if there is no available inventory in any of the
resource providers for any specified resource classes.
* `409 Conflict` with `error code `_
``placement.concurrent_update`` if inventories are updated by another request
while attempting the operation. See :ref:`generations`.
* `409 Conflict` with `error code `_
``placement.concurrent_update`` at microversion 1.28 or higher if allocations
for a specified consumer have been created, updated, or removed by another
request while attempting the operation. See :ref:`generations`.
Request
-------
.. rest_parameters:: parameters.yaml
- consumer_uuid: consumer_uuid_body
- consumer_generation: consumer_generation_min
- consumer_type: consumer_type
- project_id: project_id_body
- user_id: user_id_body
- allocations: allocations_dict_empty
- generation: resource_provider_generation_optional
- resources: resources
- mappings: mappings_in_allocations
Request example (microversions 1.38 - )
---------------------------------------
.. literalinclude:: ./samples/allocations/manage-allocations-request-1.38.json
:language: javascript
Request example (microversions 1.28 - 1.36)
-------------------------------------------
.. literalinclude:: ./samples/allocations/manage-allocations-request-1.28.json
:language: javascript
Request example (microversions 1.13 - 1.27)
-------------------------------------------
.. literalinclude:: ./samples/allocations/manage-allocations-request.json
:language: javascript
Response
--------
No body content is returned after a successful request
List allocations
================
List all allocation records for the consumer identified by
`{consumer_uuid}` on all the resource providers it is consuming.
.. note:: When listing allocations for a consumer uuid that has no
allocations a dict with an empty value is returned
``{"allocations": {}}``.
.. rest_method:: GET /allocations/{consumer_uuid}
Normal Response Codes: 200
Request
-------
.. rest_parameters:: parameters.yaml
- consumer_uuid: consumer_uuid
Response
--------
.. rest_parameters:: parameters.yaml
- allocations: allocations_by_resource_provider
- generation: resource_provider_generation
- resources: resources
- consumer_generation: consumer_generation_get
- consumer_type: consumer_type
- project_id: project_id_body_1_12
- user_id: user_id_body_1_12
Response Example (1.38 - )
--------------------------
.. literalinclude:: ./samples/allocations/get-allocations-1.38.json
:language: javascript
Response Example (1.28 - 1.36)
------------------------------
.. literalinclude:: ./samples/allocations/get-allocations-1.28.json
:language: javascript
Response Example (1.12 - 1.27)
------------------------------
.. literalinclude:: ./samples/allocations/get-allocations.json
:language: javascript
Update allocations
==================
Create or update one or more allocation records representing the consumption of
one or more classes of resources from one or more resource providers by
the consumer identified by `{consumer_uuid}`.
If allocations already exist for this consumer, they are replaced.
.. rest_method:: PUT /allocations/{consumer_uuid}
Normal Response Codes: 204
Error response codes: badRequest(400), itemNotFound(404), conflict(409)
* `409 Conflict` if there is no available inventory in any of the
resource providers for any specified resource classes.
* `409 Conflict` with `error code `_
``placement.concurrent_update`` if inventories are updated by another request
while attempting the operation. See :ref:`generations`.
* `409 Conflict` with `error code `_
``placement.concurrent_update`` at microversion 1.28 or higher if allocations
for the specified consumer have been created, updated, or removed by another
request while attempting the operation. See :ref:`generations`.
Request (microversions 1.12 - )
-------------------------------
.. rest_parameters:: parameters.yaml
- consumer_uuid: consumer_uuid
- allocations: allocations_dict
- resources: resources
- consumer_generation: consumer_generation_min
- consumer_type: consumer_type
- project_id: project_id_body
- user_id: user_id_body
- generation: resource_provider_generation_optional
- mappings: mappings_in_allocations
Request example (microversions 1.38 - )
---------------------------------------
.. literalinclude:: ./samples/allocations/update-allocations-request-1.38.json
:language: javascript
Request example (microversions 1.28 - 1.36)
-------------------------------------------
.. literalinclude:: ./samples/allocations/update-allocations-request-1.28.json
:language: javascript
Request example (microversions 1.12 - 1.27)
-------------------------------------------
.. literalinclude:: ./samples/allocations/update-allocations-request-1.12.json
:language: javascript
Request (microversions 1.0 - 1.11)
----------------------------------
.. rest_parameters:: parameters.yaml
- consumer_uuid: consumer_uuid
- allocations: allocations_array
- resources: resources
- resource_provider: resource_provider_object
- uuid: resource_provider_uuid
- project_id: project_id_body_1_8
- user_id: user_id_body_1_8
Request example (microversions 1.0 - 1.11)
------------------------------------------
.. literalinclude:: ./samples/allocations/update-allocations-request.json
:language: javascript
Response
--------
No body content is returned on a successful PUT.
Delete allocations
==================
Delete all allocation records for the consumer identified by
`{consumer_uuid}` on all resource providers it is consuming.
.. rest_method:: DELETE /allocations/{consumer_uuid}
Normal Response Codes: 204
Error response codes: itemNotFound(404)
Request
-------
.. rest_parameters:: parameters.yaml
- consumer_uuid: consumer_uuid
Response
--------
No body content is returned on a successful DELETE.
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/conf.py 0000664 0000000 0000000 00000005267 15132464062 0023023 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# placement-api-ref documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
extensions = [
'openstackdocstheme',
'os_api_ref',
'ext.validator',
]
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Placement API Reference'
copyright = '2010-present, OpenStack Foundation'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/placement'
openstackdocs_auto_name = False
openstackdocs_use_storyboard = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"sidebar_mode": "toc",
}
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Placement.tex', 'OpenStack Placement API Documentation',
'OpenStack Foundation', 'manual'),
]
# -- Options for openstackdocstheme -------------------------------------------
openstackdocs_projects = [
'placement',
]
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/errors.inc 0000664 0000000 0000000 00000006262 15132464062 0023527 0 ustar 00root root 0000000 0000000 ======
Errors
======
When there is an error interacting with the placement API, the response will
include a few different signals of what went wrong, include the status header
and information in the response body. The structure of the ``JSON`` body of an
error response is defined by the OpenStack errors_ guideline.
**HTTP Status Code**
The ``Status`` header of the response will include a code, defined by
:rfc:`7231#section-6` that gives a general overview of the problem.
This value also shows up in a ``status`` attribute in the body of the
response.
**Detail Message**
A textual description of the error condition, in a ``detail`` attribute.
The value is usually the message associated with whatever exception
happened within the service.
**Error Code**
When the microversion is ``>=1.23`` responses will also include a ``code``
attribute in the ``JSON`` body. These are documented below. Where a
response does not use a specific code ``placement.undefined_code`` is
present.
.. note:: In some cases, for example when keystone is being used and no
authentication information is provided in a request (causing a
``401`` response), the structure of the error response will not match
the above because the error is produced by code other than the
placement service.
.. _`error_codes`:
Error Codes
===========
The defined errors are:
.. list-table::
:header-rows: 1
* - Code
- Meaning
* - ``placement.undefined_code``
- The default code used when a specific code has not been defined or is
not required.
* - ``placement.inventory.inuse``
- An attempt has been made to remove or shrink inventory that has capacity
in use.
* - ``placement.concurrent_update``
- Another operation has concurrently made a request that involves one or
more of the same resources referenced in this request, changing state.
The current state should be retrieved to determine if the desired
operation should be retried.
* - ``placement.duplicate_name``
- A resource of this type already exists with the same name, and duplicate
names are not allowed.
* - ``placement.resource_provider.inuse``
- An attempt was made to remove a resource provider, but there are
allocations against its inventory.
* - ``placement.resource_provider.cannot_delete_parent``
- An attempt was made to remove a resource provider, but it has one or
more child providers. They must be removed first in order to remove this
provider.
* - ``placement.resource_provider.not_found``
- A resource provider mentioned in an operation involving multiple
resource providers, such as :ref:`reshaper`, does not exist.
* - ``placement.query.duplicate_key``
- A request included multiple instances of a query parameter that may only
be specified once.
* - ``placement.query.bad_value``
- A value in a request conformed to the schema, but failed semantic
validation.
* - ``placement.query.missing_value``
- A required query parameter is not present in a request.
.. _errors: https://specs.openstack.org/openstack/api-wg/guidelines/errors.html
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/generations.inc 0000664 0000000 0000000 00000004117 15132464062 0024526 0 ustar 00root root 0000000 0000000 .. _generations:
==========================================
Resource Provider and Consumer Generations
==========================================
Placement handles concurrent requests against the same entity by maintaining a
**generation** for resource providers and consumers. The generation is an
opaque value that is updated every time its entity is successfully changed on
the server.
At appropriate microversions, the generation is returned in responses involving
resource providers and/or consumers (allocations), and must be included in
requests which make changes to those entities. The server checks to make sure
the generation specified in the request matches the internal value. A mismatch
indicates that a different request successfully updated that entity in the
interim, thereby changing its generation. This will result in an HTTP 409
Conflict response with `error code `_
``placement.concurrent_update``.
Depending on the usage scenario, an appropriate reaction to such an error may
be to re-``GET`` the entity in question, re-evaluate and update as appropriate,
and resubmit the request with the new payload.
The following pseudocode is a simplistic example of how one might ensure that a
trait is set on a resource provider.
.. note:: This is not production code. Aside from not being valid syntax for
any particular programming language, it deliberately glosses over
details and good programming practices such as error checking, retry
limits, etc. It is purely for illustrative purposes.
::
function _is_concurrent_update(resp) {
if(resp.status_code != 409) return False
return(resp.json()["errors"][0]["code"] == "placement.concurrent_update")
}
function ensure_trait_on_provider(provider_uuid, trait) {
do {
path = "/resource_providers/" + provider_uuid + "/traits"
get_resp = placement.GET(path)
payload = get_resp.json()
if(trait in payload["traits"]) return
payload["traits"].append(trait)
put_resp = placement.PUT(path, payload)
} while _is_concurrent_update(put_resp)
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/index.rst 0000664 0000000 0000000 00000002067 15132464062 0023360 0 ustar 00root root 0000000 0000000 :tocdepth: 2
===============
Placement API
===============
This is a reference for the OpenStack Placement API. To learn more about
OpenStack Placement API concepts, please refer to the
:placement-doc:`Placement Introduction <>`.
The Placement API uses JSON for data exchange. As such, the ``Content-Type``
header for APIs sending data payloads in the request body (i.e. ``PUT`` and
``POST``) must be set to ``application/json`` unless otherwise noted.
.. rest_expand_all::
.. include:: request-ids.inc
.. include:: errors.inc
.. include:: generations.inc
.. include:: root.inc
.. include:: resource_providers.inc
.. include:: resource_provider.inc
.. include:: resource_classes.inc
.. include:: resource_class.inc
.. include:: inventories.inc
.. include:: inventory.inc
.. include:: aggregates.inc
.. include:: traits.inc
.. include:: resource_provider_traits.inc
.. include:: allocations.inc
.. include:: resource_provider_allocations.inc
.. include:: usages.inc
.. include:: resource_provider_usages.inc
.. include:: allocation_candidates.inc
.. include:: reshaper.inc
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/inventories.inc 0000664 0000000 0000000 00000006430 15132464062 0024555 0 ustar 00root root 0000000 0000000 =============================
Resource provider inventories
=============================
Each resource provider has inventory records for one or more classes
of resources. An inventory record contains information about the total
and reserved amounts of the resource and any consumption constraints
for that resource against the provider.
List resource provider inventories
==================================
.. rest_method:: GET /resource_providers/{uuid}/inventories
Normal Response Codes: 200
Error response codes: itemNotFound(404)
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
Response
--------
.. rest_parameters:: parameters.yaml
- inventories: inventories
- resource_provider_generation: resource_provider_generation
- allocation_ratio: allocation_ratio
- max_unit: max_unit
- min_unit: min_unit
- reserved: reserved
- step_size: step_size
- total: total
Response Example
----------------
.. literalinclude:: ./samples/inventories/get-inventories.json
:language: javascript
Update resource provider inventories
====================================
Replaces the set of inventory records for the resource provider identified by `{uuid}`.
.. rest_method:: PUT /resource_providers/{uuid}/inventories
Normal Response Codes: 200
Error response codes: badRequest(400), itemNotFound(404), conflict(409)
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
- resource_provider_generation: resource_provider_generation
- inventories: inventories
- total: total
- allocation_ratio: allocation_ratio_opt
- max_unit: max_unit_opt
- min_unit: min_unit_opt
- reserved: reserved_opt
- step_size: step_size_opt
Request example
---------------
.. literalinclude:: ./samples/inventories/update-inventories-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- resource_provider_generation: resource_provider_generation
- inventories: inventories
- allocation_ratio: allocation_ratio
- max_unit: max_unit
- min_unit: min_unit
- reserved: reserved
- step_size: step_size
- total: total
Response Example
----------------
.. literalinclude:: ./samples/inventories/update-inventories.json
:language: javascript
Delete resource provider inventories
====================================
Deletes all inventory records for the resource provider identified by `{uuid}`.
**Troubleshooting**
The request returns an HTTP 409 when there are allocations against
the provider or if the provider's inventory is updated by another
thread while attempting the operation.
.. note:: Method is available starting from version 1.5.
.. rest_method:: DELETE /resource_providers/{uuid}/inventories
Normal Response Codes: 204
Error response codes: itemNotFound(404), conflict(409)
.. note:: Since this request does not accept the resource provider generation,
it is not safe to use when multiple threads are managing inventories
for a single provider. In such situations, use the
``PUT /resource_providers/{uuid}/inventories`` API with an empty
``inventories`` dict.
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
Response
--------
No body content is returned on a successful DELETE.
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/inventory.inc 0000664 0000000 0000000 00000006146 15132464062 0024251 0 ustar 00root root 0000000 0000000 ===========================
Resource provider inventory
===========================
See `Resource provider inventories`_ for a description.
This group of API calls works with a single inventory identified by ``resource_class``.
One inventory can be listed, created, updated and deleted per each call.
Show resource provider inventory
================================
.. rest_method:: GET /resource_providers/{uuid}/inventories/{resource_class}
Normal Response Codes: 200
Error response codes: itemNotFound(404)
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
- resource_class: resource_class_path
Response
--------
.. rest_parameters:: parameters.yaml
- resource_provider_generation: resource_provider_generation
- allocation_ratio: allocation_ratio
- max_unit: max_unit
- min_unit: min_unit
- reserved: reserved
- step_size: step_size
- total: total
Response Example
----------------
.. literalinclude:: ./samples/inventories/get-inventory.json
:language: javascript
Update resource provider inventory
==================================
Replace the inventory record of the `{resource_class}` for the resource
provider identified by `{uuid}`.
.. rest_method:: PUT /resource_providers/{uuid}/inventories/{resource_class}
Normal Response Codes: 200
Error response codes: badRequest(400), itemNotFound(404), conflict(409)
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
- resource_class: resource_class_path
- resource_provider_generation: resource_provider_generation
- total: total
- allocation_ratio: allocation_ratio_opt
- max_unit: max_unit_opt
- min_unit: min_unit_opt
- reserved: reserved_opt
- step_size: step_size_opt
Request example
---------------
.. literalinclude:: ./samples/inventories/update-inventory-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- resource_provider_generation: resource_provider_generation
- allocation_ratio: allocation_ratio
- max_unit: max_unit
- min_unit: min_unit
- reserved: reserved
- step_size: step_size
- total: total
Response Example
----------------
.. literalinclude:: ./samples/inventories/update-inventory.json
:language: javascript
Delete resource provider inventory
==================================
Delete the inventory record of the `{resource_class}` for
the resource provider identified by `{uuid}`.
See `Troubleshooting`_ section in ``Delete resource provider
inventories`` for a description. In addition, the request returns
HTTP 409 when there are allocations for the specified resource
provider and resource class.
.. _Troubleshooting: ?expanded=delete-resource-provider-inventories-detail#delete-resource-provider-inventories
.. rest_method:: DELETE /resource_providers/{uuid}/inventories/{resource_class}
Normal Response Codes: 204
Error response codes: itemNotFound(404), conflict(409)
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
- resource_class: resource_class_path
Response
--------
No body content is returned on a successful DELETE.
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/parameters.yaml 0000664 0000000 0000000 00000075206 15132464062 0024553 0 ustar 00root root 0000000 0000000 # variables in header
location:
description: |
The location URL of the resource created,
HTTP header "Location: " will be returned.
in: header
required: true
type: string
# variables in path
consumer_uuid: &consumer_uuid
type: string
in: path
required: true
description: >
The uuid of a consumer.
resource_class_path: &resource_class_path
type: string
in: path
required: true
description: >
The name of one resource class.
resource_class_path_custom: &resource_class_path_custom
type: string
in: path
required: true
description: >
The name of one resource class. The name must start with
the prefix ``CUSTOM_``. If not, the request returns a ``Bad Request (400)``
response code.
resource_provider_uuid_path: &resource_provider_uuid_path
type: string
in: path
required: true
description: >
The uuid of a resource provider.
trait_name:
type: string
in: path
required: true
description: >
The name of a trait.
# variables in query
allocation_candidates_group_policy:
type: string
in: query
required: false
min_version: 1.25
description: >
When more than one ``resourcesN`` query parameter is supplied,
``group_policy`` is required to indicate how the groups should interact.
With ``group_policy=none``, separate groupings - with or without a suffix -
may or may not be satisfied by the same provider. With
``group_policy=isolate``, suffixed groups are guaranteed to be satisfied by
*different* providers - though there may still be overlap with the
suffixless group.
allocation_candidates_in_tree: &allocation_candidates_in_tree
type: string
in: query
required: false
description: >
A string representing a resource provider uuid. When supplied, it will
filter the returned allocation candidates to only those resource providers
that are in the same tree with the given resource provider.
min_version: 1.31
allocation_candidates_in_tree_granular:
<<: *allocation_candidates_in_tree
description: >
A string representing a resource provider uuid. The parameter key is
``in_treeN``, where ``N`` represents a suffix corresponding with a
``resourcesN`` parameter. When supplied, it will filter the returned
allocation candidates for that suffixed group to only those resource
providers that are in the same tree with the given resource provider.
**In microversions 1.25 - 1.32** the suffix is a number.
**Starting from microversion 1.33** the suffix is a string that may be 1-64
characters long and consist of numbers, ``a-z``, ``A-Z``, ``-``, and ``_``.
allocation_candidates_limit:
type: integer
in: query
required: false
min_version: 1.16
description: >
A positive integer used to limit the maximum number of allocation
candidates returned in the response.
allocation_candidates_member_of:
type: string
in: query
required: false
description: >
A string representing an aggregate uuid; or the prefix ``in:`` followed by
a comma-separated list of strings representing aggregate uuids. The
resource providers in the allocation request in the response must directly
or via the root provider be associated with the aggregate or aggregates
identified by uuid::
member_of=5e08ea53-c4c6-448e-9334-ac4953de3cfa
member_of=in:42896e0d-205d-4fe3-bd1e-100924931787,5e08ea53-c4c6-448e-9334-ac4953de3cfa
**Starting from microversion 1.24** specifying multiple ``member_of`` query
string parameters is possible. Multiple ``member_of`` parameters will
result in filtering providers that are directly or via root provider
associated with aggregates listed in all of the ``member_of`` query string
values. For example, to get the providers that are associated with
aggregate A as well as associated with any of aggregates B or C, the user
could issue the following query::
member_of=AGGA_UUID&member_of=in:AGGB_UUID,AGGC_UUID
**Starting from microversion 1.32** specifying forbidden aggregates is
supported in the ``member_of`` query string parameter. Forbidden aggregates
are prefixed with a ``!``. This negative expression can also be used in
multiple ``member_of`` parameters::
member_of=AGGA_UUID&member_of=!AGGB_UUID
would translate logically to "Candidate resource providers must be in AGGA
and *not* in AGGB."
We do NOT support ``!`` on the values within ``in:``, but we support
``!in:``. Both of the following two example queries return candidate
resource providers that are NOT in AGGA, AGGB, or AGGC::
member_of=!in:AGGA_UUID,AGGB_UUID,AGGC_UUID
member_of=!AGGA_UUID&member_of=!AGGB_UUID&member_of=!AGGC_UUID
We do not check if the same aggregate uuid is in both positive and negative
expression to return 400 BadRequest. We still return 200 for such cases.
For example::
member_of=AGGA_UUID&member_of=!AGGA_UUID
would return empty ``allocation_requests`` and ``provider_summaries``,
while::
member_of=in:AGGA_UUID,AGGB_UUID&member_of=!AGGA_UUID
would return resource providers that are NOT in AGGA but in AGGB.
min_version: 1.21
allocation_candidates_member_of_granular:
type: string
in: query
required: false
description: >
A string representing an aggregate uuid; or the prefix ``in:`` followed by
a comma-separated list of strings representing aggregate uuids. The
returned resource providers must directly be associated with at least one
of the aggregates identified by uuid.
**Starting from microversion 1.32** specifying forbidden aggregates is
supported. Forbidden aggregates are expressed with a ``!`` prefix; or the
prefix ``!in:`` followed by a comma-separated list of strings representing
aggregate uuids. The returned resource providers must not directly be
associated with any of the aggregates identified by uuid.
The parameter key is ``member_ofN``, where ``N`` represents a suffix
corresponding with a ``resourcesN`` parameter. The value format is the
same as for the (not granular) ``member_of`` parameter; but all of the
resources and traits specified in a granular grouping will always be
satisfied by the same resource provider.
**In microversions 1.25 - 1.32** the suffix is a number.
**Starting from microversion 1.33** the suffix is a string that may be 1-64
characters long and consist of numbers, ``a-z``, ``A-Z``, ``-``, and ``_``.
Separate groupings - with or without a suffix - may or may not be satisfied
by the same provider, depending on the value of the ``group_policy``
parameter.
It is an error to specify a ``member_ofN`` parameter without a
corresponding ``resourcesN`` parameter with the same suffix.
min_version: 1.25
allocation_candidates_root_required:
type: string
in: query
required: false
min_version: 1.35
description: |
A comma-separated list of trait requirements that the root provider of the
(non-sharing) tree must satisfy::
root_required=COMPUTE_SUPPORTS_MULTI_ATTACH,!CUSTOM_WINDOWS_LICENSED
Allocation requests in the response will be limited to those whose
(non-sharing) tree's root provider satisfies the specified trait
requirements. Traits which are forbidden (must **not** be present on the
root provider) are expressed by prefixing the trait with a ``!``.
allocation_candidates_same_subtree:
type: string
in: query
required: false
min_version: 1.36
description: |
A comma-separated list of request group suffix strings ($S). Each must
exactly match a suffix on a granular group somewhere else in the request.
Importantly, the identified request groups need not have a resources[$S].
If this is provided, at least one of the resource providers satisfying a
specified request group must be an ancestor of the rest.
The ``same_subtree`` query parameter can be repeated and each repeat group
is treated independently.
consumer_type_req:
type: string
in: query
required: false
min_version: 1.38
description: |
A string that consists of numbers, ``A-Z``, and ``_`` describing the
consumer type by which to filter usage results. For example, to retrieve
only usage information for 'INSTANCE' type consumers a parameter of
``consumer_type=INSTANCE`` should be provided.
The ``all`` query parameter may be specified to group all results under
one key, ``all``. The ``unknown`` query parameter may be specified to
group all results under one key, ``unknown``.
project_id: &project_id
type: string
in: query
required: true
description: >
The uuid of a project.
required_traits_granular:
type: string
in: query
required: false
description: |
A comma-separated list of traits that a provider must have, or (if prefixed
with a ``!``) **not** have::
required42=HW_CPU_X86_AVX,HW_CPU_X86_SSE,!HW_CPU_X86_AVX2
The parameter key is ``requiredN``, where ``N`` represents a suffix
corresponding with a ``resourcesN`` parameter.
The value format is the same as for the (not granular) ``required``
parameter; but all of the resources and traits specified in a suffixed
grouping will always be satisfied by the same resource provider. Separate
groupings - with or without a suffix - may or may not be satisfied by the
same provider, depending on the value of the ``group_policy`` parameter.
**In microversions 1.25 - 1.32** the suffix is a number.
**Starting from microversion 1.33** the suffix is a string that may be 1-64
characters long and consist of numbers, ``a-z``, ``A-Z``, ``-``, and ``_``.
It is an error to specify a ``requiredN`` parameter without a corresponding
``resourcesN`` parameter with the same suffix.
**Starting from microversion 1.39** the granular ``requiredN`` query
parameter gained support for the ``in:`` syntax as well as the repetition
of the parameter. So::
requiredN=in:T3,T4&requiredN=T1,!T2
is supported and it means T1 and not T2 and (T3 or T4).
min_version: 1.25
required_traits_unnumbered:
type: string
in: query
required: false
min_version: 1.17
description: |
A comma-separated list of traits that a provider must have::
required=HW_CPU_X86_AVX,HW_CPU_X86_SSE
Allocation requests in the response will be for resource providers that
have capacity for all requested resources and the set of those resource
providers will *collectively* contain all of the required traits. These
traits may be satisfied by any provider in the same non-sharing tree or
associated via aggregate as far as that provider also contributes resource
to the request. **Starting from microversion 1.22** traits which
are forbidden from any resource provider contributing resources to the
request may be expressed by prefixing a trait with a ``!``.
**Starting from microversion 1.39** the ``required`` query parameter can be
repeated. The trait lists from the repeated parameters are ANDed together.
So::
required=T1,!T2&required=T3
means T1 and not T2 and T3.
Also **starting from microversion 1.39** the ``required`` parameter
supports the syntax::
required=in:T1,T2,T3
which means T1 or T2 or T3.
Mixing forbidden traits into an ``in:`` prefixed value is not supported and
rejected. But mixing a normal trait list and an ``in:`` prefixed trait list
in two query params within the same request is supported. So::
required=in:T3,T4&required=T1,!T2
is supported and it means T1 and not T2 and (T3 or T4).
resource_provider_member_of:
type: string
in: query
required: false
description: >
A string representing an aggregate uuid; or the prefix ``in:`` followed by
a comma-separated list of strings representing aggregate uuids. The
returned resource providers must directly be associated with at least one
of the aggregates identified by uuid::
member_of=5e08ea53-c4c6-448e-9334-ac4953de3cfa
member_of=in:42896e0d-205d-4fe3-bd1e-100924931787,5e08ea53-c4c6-448e-9334-ac4953de3cfa
**Starting from microversion 1.24** specifying multiple ``member_of`` query
string parameters is possible. Multiple ``member_of`` parameters will
result in filtering providers that are associated with aggregates listed in
all of the ``member_of`` query string values. For example, to get the
providers that are associated with aggregate A as well as associated with
any of aggregates B or C, the user could issue the following query::
member_of=AGGA_UUID&member_of=in:AGGB_UUID,AGGC_UUID
**Starting from microversion 1.32** specifying forbidden aggregates is
supported in the ``member_of`` query string parameter. Forbidden aggregates
are prefixed with a ``!``. This negative expression can also be used in
multiple ``member_of`` parameters::
member_of=AGGA_UUID&member_of=!AGGB_UUID
would translate logically to "Candidate resource providers must be in AGGA
and *not* in AGGB."
We do NOT support ``!`` on the values within ``in:``, but we support
``!in:``. Both of the following two example queries return candidate
resource providers that are NOT in AGGA, AGGB, or AGGC::
member_of=!in:AGGA_UUID,AGGB_UUID,AGGC_UUID
member_of=!AGGA_UUID&member_of=!AGGB_UUID&member_of=!AGGC_UUID
We do not check if the same aggregate uuid is in both positive and negative
expression to return 400 BadRequest. We still return 200 for such cases.
For example::
member_of=AGGA_UUID&member_of=!AGGA_UUID
would return an empty list for ``resource_providers``, while::
member_of=in:AGGA_UUID,AGGB_UUID&member_of=!AGGA_UUID
would return resource providers that are NOT in AGGA but in AGGB.
min_version: 1.3
resource_provider_name_query:
type: string
in: query
required: false
description: >
The name of a resource provider to filter the list.
resource_provider_required_query:
type: string
in: query
required: false
description: |
A comma-delimited list of string trait names. Results will be filtered to
include only resource providers having all the specified traits. **Starting
from microversion 1.22** traits which are forbidden from any resource
provider may be expressed by prefixing a trait with a ``!``.
**Starting from microversion 1.39** the ``required`` query parameter can be
repeated. The trait lists from the repeated parameters are ANDed together.
So::
required=T1,!T2&required=T3
means T1 and not T2 and T3.
Also **starting from microversion 1.39** the ``required`` parameter
supports the syntax::
required=in:T1,T2,T3
which means T1 or T2 or T3.
Mixing forbidden traits into an ``in:`` prefixed value is not supported and
rejected. But mixing normal trait list and ``in:`` trait list in two query
params within the same request is supported. So::
required=in:T3,T4&required=T1,!T2
is supported and it means T1 and not T2 and (T3 or T4).
min_version: 1.18
resource_provider_tree_query:
type: string
in: query
required: false
description: >
A UUID of a resource provider. The returned resource providers will be in
the same "provider tree" as the specified provider.
min_version: 1.14
resource_provider_uuid_query:
<<: *resource_provider_uuid_path
in: query
required: false
resources_query_1_4:
type: string
in: query
required: false
description: |
A comma-separated list of strings indicating an amount of
resource of a specified class that a provider must have the
capacity and availability to serve::
resources=VCPU:4,DISK_GB:64,MEMORY_MB:2048
Note that the amount must be an integer greater than 0.
min_version: 1.4
resources_query_ac:
type: string
in: query
required: false
description: |
A comma-separated list of strings indicating an amount of
resource of a specified class that providers in each allocation request
must *collectively* have the capacity and availability to serve::
resources=VCPU:4,DISK_GB:64,MEMORY_MB:2048
These resources may be satisfied by any provider in the same non-sharing
tree or associated via aggregate.
resources_query_granular:
type: string
in: query
required: false
description: |
A comma-separated list of strings indicating an amount of
resource of a specified class that a provider must have the
capacity to serve::
resources42=VCPU:4,DISK_GB:64,MEMORY_MB:2048
The parameter key is ``resourcesN``, where ``N`` represents a unique
suffix. The value format is the same as for the (not granular)
``resources`` parameter, but the resources specified in a ``resourcesN``
parameter will always be satisfied by a single provider.
**In microversions 1.25 - 1.32** the suffix is a number.
**Starting from microversion 1.33** the suffix is a string that may be 1-64
characters long and consist of numbers, ``a-z``, ``A-Z``, ``-``, and ``_``.
Separate groupings - with or without a suffix - may or may not be satisfied
by the same provider depending on the value of the ``group_policy``
parameter.
min_version: 1.25
trait_associated:
type: string
in: query
required: false
description: >
If this parameter has a true value, the returned traits will be
those that are associated with at least one resource provider.
Available values for the parameter are true and false.
trait_name_query:
type: string
in: query
required: false
description: |
A string to filter traits. The following options are available:
`startswith` operator filters the traits whose name begins with a
specific prefix, e.g. name=startswith:CUSTOM,
`in` operator filters the traits whose name is in the specified list, e.g.
name=in:HW_CPU_X86_AVX,HW_CPU_X86_SSE,HW_CPU_X86_INVALID_FEATURE.
user_id: &user_id
type: string
in: query
required: false
description: >
The uuid of a user.
# variables in body
aggregates:
type: array
in: body
required: true
description: >
A list of aggregate uuids. Previously nonexistent aggregates are
created automatically.
allocation_ratio: &allocation_ratio
type: float
in: body
required: true
description: |
It is used in determining whether consumption of the resource of
the provider can exceed physical constraints.
For example, for a vCPU resource with::
allocation_ratio = 16.0
total = 8
Overall capacity is equal to 128 vCPUs.
allocation_ratio_opt:
<<: *allocation_ratio
required: false
allocation_requests:
type: array
in: body
required: true
description: >
A list of objects that contain a
serialized HTTP body that a client may subsequently use in a call
to PUT /allocations/{consumer_uuid} to claim resources against a
related set of resource providers.
allocations_array:
type: array
in: body
required: true
description: >
A list of dictionaries.
allocations_by_resource_provider:
type: object
in: body
required: true
description: >
A dictionary of allocations keyed by resource provider uuid.
allocations_dict: &allocations_dict
type: object
in: body
required: true
description: >
A dictionary of resource allocations keyed by resource provider uuid.
allocations_dict_empty:
<<: *allocations_dict
description: >
A dictionary of resource allocations keyed by resource provider uuid.
If this is an empty object, allocations for this consumer will be
removed.
min_version: null
capacity:
type: integer
in: body
required: true
description: >
The amount of the resource that the provider can accommodate.
consumer_count:
type: integer
in: body
required: true
min_version: 1.38
description: >
The number of consumers of a particular ``consumer_type``.
consumer_generation: &consumer_generation
type: integer
in: body
required: true
description: >
The generation of the consumer. Should be set to ``null`` when indicating
that the caller expects the consumer does not yet exist.
consumer_generation_get:
<<: *consumer_generation
description: >
The generation of the consumer. Will be absent when listing allocations for
a consumer uuid that has no allocations.
min_version: 1.28
consumer_generation_min:
<<: *consumer_generation
min_version: 1.28
consumer_type:
type: string
in: body
required: true
min_version: 1.38
description: >
A string that consists of numbers, ``A-Z``, and ``_`` describing what kind
of consumer is creating, or has created, allocations using a quantity of
inventory. The string is determined by the client when writing allocations
and it is up to the client to ensure correct choices amongst collaborating
services. For example, the compute service may choose to type some
consumers 'INSTANCE' and others 'MIGRATION'.
consumer_uuid_body:
<<: *consumer_uuid
in: body
inventories:
type: object
in: body
required: true
description: >
A dictionary of inventories keyed by resource classes.
mappings: &mappings
type: object
in: body
required: true
description: >
A dictionary associating request group suffixes with a list of uuids
identifying the resource providers that satisfied each group. The empty
string and ``[a-zA-Z0-9_-]+`` are valid suffixes. This field may be sent
when writing allocations back to the server but will be ignored; this
preserves symmetry between read and write representations.
min_version: 1.34
mappings_in_allocations:
<<: *mappings
required: false
max_unit: &max_unit
type: integer
in: body
required: true
description: >
A maximum amount any single allocation against an inventory can have.
max_unit_opt:
<<: *max_unit
required: false
min_unit: &min_unit
type: integer
in: body
required: true
description: >
A minimum amount any single allocation against an inventory can have.
min_unit_opt:
<<: *min_unit
required: false
project_id_body: &project_id_body
<<: *project_id
in: body
project_id_body_1_12:
<<: *project_id_body
description: >
The uuid of a project. Will be absent when listing allocations for
a consumer uuid that has no allocations.
min_version: 1.12
project_id_body_1_8:
<<: *project_id_body
min_version: 1.8
provider_summaries:
type: object
in: body
required: true
description: >
A dictionary keyed by resource provider UUID included in the
``allocation_requests``, of dictionaries of inventory/capacity information.
provider_summaries_1_12:
type: object
in: body
required: true
description: >
A dictionary keyed by resource provider UUID included in the
``allocation_requests``, of dictionaries of inventory/capacity information.
The list of traits the resource provider has associated with it is included
in version 1.17 and above.
Starting from microversion 1.29, the provider summaries include
all resource providers in the same resource provider tree that has one
or more resource providers included in the ``allocation_requests``.
reserved: &reserved
type: integer
in: body
required: true
description: >
The amount of the resource a provider has reserved for its own use.
reserved_opt:
<<: *reserved
required: false
description: >
The amount of the resource a provider has reserved for its own use.
Up to microversion 1.25, this value has to be less than the value of
``total``. Starting from microversion 1.26, this value has to be less
than or equal to the value of ``total``.
reshaper_allocations:
type: object
in: body
required: true
description: >
A dictionary of multiple allocations, keyed by consumer uuid. Each
collection of allocations describes the full set of allocations for
each consumer. Each consumer allocations dict is itself a dictionary
of resource allocations keyed by resource provider uuid. An empty
dictionary indicates no change in existing allocations, whereas an empty
``allocations`` dictionary **within** a consumer dictionary indicates that
all allocations for that consumer should be deleted.
reshaper_inventories:
type: object
in: body
required: true
description: >
A dictionary of multiple inventories, keyed by resource provider uuid. Each
inventory describes the desired full inventory for each resource provider.
An empty dictionary causes the inventory for that provider to be deleted.
resource_class:
<<: *resource_class_path
in: body
resource_class_custom:
<<: *resource_class_path_custom
in: body
resource_class_links:
type: array
in: body
required: true
description: >
A list of links associated with one resource class.
resource_classes:
type: array
in: body
required: true
description: >
A list of ``resource_class`` objects.
resource_provider_allocations:
type: object
in: body
required: true
description: >
A dictionary of allocation records keyed by consumer uuid.
resource_provider_generation: &resource_provider_generation
type: integer
in: body
required: true
description: >
A consistent view marker that assists with the management of
concurrent resource provider updates.
resource_provider_generation_optional:
<<: *resource_provider_generation
required: false
description: >
A consistent view marker that assists with the management of
concurrent resource provider updates. The value is ignored;
it is present to preserve symmetry between read and
write representations.
resource_provider_generation_v1_19:
<<: *resource_provider_generation
min_version: 1.19
resource_provider_links: &resource_provider_links
type: array
in: body
required: true
description: |
A list of links associated with one resource provider.
.. note::
Aggregates relationship link is available starting from version 1.1.
Traits relationship link is available starting from version 1.6.
Allocations relationship link is available starting from version 1.11.
resource_provider_links_v1_20:
<<: *resource_provider_links
description: |
A list of links associated with the resource provider.
resource_provider_name:
type: string
in: body
required: true
description: >
The name of one resource provider.
resource_provider_object:
type: object
in: body
required: true
description: >
A dictionary which contains the UUID of the resource provider.
resource_provider_parent_provider_uuid_request:
type: string
in: body
required: false
description: |
The UUID of the immediate parent of the resource provider.
* Before version ``1.37``, once set, the parent of a resource provider
cannot be changed.
* Since version ``1.37``, it can be set to any existing provider UUID
excepts to providers that would cause a loop. Also it can be set to null
to transform the provider to a new root provider. This operation needs
to be used carefully. Moving providers can mean that the original rules
used to create the existing resource allocations may be invalidated
by that move.
min_version: 1.14
resource_provider_parent_provider_uuid_required_no_min:
type: string
in: body
required: true
description: >
The UUID of the immediate parent of the resource provider.
resource_provider_parent_provider_uuid_response_1_14:
type: string
in: body
required: true
description: >
The UUID of the immediate parent of the resource provider.
min_version: 1.14
resource_provider_parent_provider_uuid_response_1_29:
type: string
in: body
required: true
description: >
The UUID of the immediate parent of the resource provider.
min_version: 1.29
resource_provider_root_provider_uuid_1_29:
type: string
in: body
required: true
description: >
UUID of the top-most provider in this provider tree.
min_version: 1.29
resource_provider_root_provider_uuid_no_min: &resource_provider_root_provider_uuid_no_min
type: string
in: body
required: true
description: >
UUID of the top-most provider in this provider tree.
resource_provider_root_provider_uuid_required:
<<: *resource_provider_root_provider_uuid_no_min
description: >
Read-only UUID of the top-most provider in this provider tree.
min_version: 1.14
resource_provider_usages:
type: object
in: body
required: true
description: >
The usage summary of the resource provider. This is a dictionary that
describes how much each class of resource is being consumed on this
resource provider. For example, ``"VCPU": 1`` means 1 VCPU is used.
resource_provider_uuid:
<<: *resource_provider_uuid_path
in: body
resource_provider_uuid_opt:
<<: *resource_provider_uuid_path
in: body
required: false
resource_providers:
type: array
in: body
required: true
description: >
A list of ``resource_provider`` objects.
resources:
type: object
in: body
required: true
description: >
A dictionary of resource records keyed by resource class name.
resources_single:
type: integer
in: body
required: true
description: >
An amount of resource class consumed in a usage report.
step_size: &step_size
type: integer
in: body
required: true
description: >
A representation of the divisible amount of the resource
that may be requested. For example, step_size = 5 means
that only values divisible by 5 (5, 10, 15, etc.) can be requested.
step_size_opt:
<<: *step_size
required: false
total:
type: integer
in: body
required: true
description: >
The actual amount of the resource that the provider can accommodate.
traits: &traits
type: array
in: body
required: true
description: >
A list of traits.
traits_1_17:
<<: *traits
min_version: 1.17
used:
type: integer
in: body
required: true
description: >
The amount of the resource that has been already allocated.
user_id_body: &user_id_body
<<: *user_id
in: body
required: true
user_id_body_1_12:
<<: *user_id_body
description: >
The uuid of a user. Will be absent when listing allocations for
a consumer uuid that has no allocations.
min_version: 1.12
user_id_body_1_8:
<<: *user_id_body
min_version: 1.8
version_id:
type: string
in: body
required: true
description: >
A common name for the version being described. Informative only.
version_links:
type: array
in: body
required: true
description: >
A list of links related to and describing this version.
version_max:
type: string
in: body
required: true
description: >
The maximum microversion that is supported.
version_min:
type: string
in: body
required: true
description: >
The minimum microversion that is supported.
version_status:
type: string
in: body
required: true
description: >
The status of the version being described. With placement this is
"CURRENT".
versions:
type: array
in: body
required: true
description: >
A list of version objects that describe the API versions available.
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/request-ids.inc 0000664 0000000 0000000 00000004646 15132464062 0024464 0 ustar 00root root 0000000 0000000 ===========
Request IDs
===========
All logs on the system, by default, include the global request ID and
the local request ID when available.
The local request ID is a unique ID locally generated by each service,
and thus different for each service (Nova, Cinder, Glance, Neutron, etc.)
involved in that operation. The format is ``req-`` + UUID (UUID4).
The global request ID is a user-specified request ID which is utilized as
a common identifier by all services. This request ID is same among all
services involved in that operation. The format is ``req-`` + UUID (UUID4).
This allows an administrator to track
the API request processing as it transitions between all the different
nova services or between nova and other component services called by Nova
during that request.
Even if specifying a global request ID in a request, users receive always
a local request ID as the response.
For more details about request IDs, please reference: `Faults
`_
(It is *not* for Placement APIs, but there are some common points.)
**Request**
.. NOTE(takashin): The 'rest_parameters' directive needs the 'rest_method'
directive before itself. But this file does not contain
the 'rest_method' directive.
So the 'rest_parameters' directive is not used.
.. list-table::
:widths: 20 10 10 60
:header-rows: 1
* - Name
- In
- Type
- Description
* - X-Openstack-Request-Id (Optional)
- header
- string
- The global request ID, which is a unique common ID for tracking each
request in OpenStack components. The format of the global request ID
must be ``req-`` + UUID (UUID4). If not in accordance with the format,
it is ignored. It is associated with the request and appears in the log
lines for that request. By default, the middleware configuration ensures
that the global request ID appears in the log files.
**Response**
.. list-table::
:widths: 20 10 10 60
:header-rows: 1
* - Name
- In
- Type
- Description
* - X-Openstack-Request-Id
- header
- string
- The local request ID, which is a unique ID generated automatically for
tracking each request to placement. It is associated with the request and
appears in the log lines for that request. By default, the middleware
configuration ensures that the local request ID appears in the log files.
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/reshaper.inc 0000664 0000000 0000000 00000003172 15132464062 0024021 0 ustar 00root root 0000000 0000000
.. _reshaper:
========
Reshaper
========
.. note:: Reshaper requests are available starting from version 1.30.
Reshaper
========
Atomically migrate resource provider inventories and associated allocations.
This is used when some of the inventory needs to move from one resource
provider to another, such as when a class of inventory moves from a parent
provider to a new child provider.
.. note:: This is a special operation that should only be used in rare cases
of resource provider topology changing when inventory is in use.
Only use this if you are really sure of what you are doing.
.. rest_method:: POST /reshaper
Normal Response Codes: 204
Error Response Codes: badRequest(400), conflict(409)
Request
-------
.. rest_parameters:: parameters.yaml
- inventories: reshaper_inventories
- inventories.{resource_provider_uuid}.resource_provider_generation: resource_provider_generation
- inventories.{resource_provider_uuid}.inventories: inventories
- allocations: reshaper_allocations
- allocations.{consumer_uuid}.allocations: allocations_dict_empty
- allocations.{consumer_uuid}.allocations.{resource_provider_uuid}.resources: resources
- allocations.{consumer_uuid}.project_id: project_id_body
- allocations.{consumer_uuid}.user_id: user_id_body
- allocations.{consumer_uuid}.mappings: mappings
- allocations.{consumer_uuid}.consumer_generation: consumer_generation
- allocations.{consumer_uuid}.consumer_type: consumer_type
Request Example
---------------
.. literalinclude:: ./samples/reshaper/post-reshaper-1.38.json
:language: javascript
No body content is returned on a successful POST.
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/resource_class.inc 0000664 0000000 0000000 00000006202 15132464062 0025221 0 ustar 00root root 0000000 0000000 ==============
Resource Class
==============
See `resource classes`_ for a description.
This group of API calls works with a single resource class
identified by `name`. One resource class can be listed, updated and
deleted.
.. note:: Resource class API calls are available starting from version 1.2.
Show resource class
===================
.. rest_method:: GET /resource_classes/{name}
Return a representation of the resource class identified by `{name}`.
Normal Response Codes: 200
Error response codes: itemNotFound(404)
Request
-------
.. rest_parameters:: parameters.yaml
- name: resource_class_path
Response
--------
.. rest_parameters:: parameters.yaml
- name: resource_class
- links: resource_class_links
Response Example
----------------
.. literalinclude:: ./samples/resource_classes/get-resource_class.json
:language: javascript
Update resource class
=====================
.. rest_method:: PUT /resource_classes/{name}
Create or validate the existence of single resource class identified by `{name}`.
.. note:: Method is available starting from version 1.7.
Normal Response Codes: 201, 204
A `201 Created` response code will be returned if the new resource class
is successfully created.
A `204 No Content` response code will be returned if the resource class
already exists.
Error response codes: badRequest(400)
Request
-------
.. rest_parameters:: parameters.yaml
- name: resource_class_path_custom
Response
--------
.. rest_parameters:: parameters.yaml
- Location: location
No body content is returned on a successful PUT.
Update resource class (microversions 1.2 - 1.6)
===============================================
.. warning:: Changing resource class names using the <1.7 microversion is strongly discouraged.
.. rest_method:: PUT /resource_classes/{name}
Update the name of the resource class identified by `{name}`.
Normal Response Codes: 200
Error response codes: badRequest(400), itemNotFound(404), conflict(409)
A `409 Conflict` response code will be returned if another resource
class exists with the provided name.
Request
-------
.. rest_parameters:: parameters.yaml
- name: resource_class_path
- name: resource_class_custom
Request example
---------------
.. literalinclude:: ./samples/resource_classes/update-resource_class-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- name: resource_class
- links: resource_class_links
Response Example
----------------
.. literalinclude:: ./samples/resource_classes/update-resource_class.json
:language: javascript
Delete resource class
=====================
.. rest_method:: DELETE /resource_classes/{name}
Delete the resource class identified by `{name}`.
Normal Response Codes: 204
Error response codes: badRequest(400), itemNotFound(404), conflict(409)
A `400 BadRequest` response code will be returned if trying
to delete a standard resource class.
A `409 Conflict` response code will be returned if there exist
inventories for the resource class.
Request
-------
.. rest_parameters:: parameters.yaml
- name: resource_class_path
Response
--------
No body content is returned on a successful DELETE.
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/resource_classes.inc 0000664 0000000 0000000 00000003071 15132464062 0025552 0 ustar 00root root 0000000 0000000 ================
Resource Classes
================
Resource classes are entities that indicate standard or
deployer-specific resources that can be provided by a resource
provider.
.. note:: Resource class API calls are available starting from version 1.2.
List resource classes
=====================
.. rest_method:: GET /resource_classes
Return a list of all resource classes.
Normal Response Codes: 200
Response
--------
.. rest_parameters:: parameters.yaml
- resource_classes: resource_classes
- links: resource_class_links
- name: resource_class
Response Example
----------------
.. literalinclude:: ./samples/resource_classes/get-resource_classes.json
:language: javascript
Create resource class
=====================
.. rest_method:: POST /resource_classes
Create a new resource class. The new class must be a *custom* resource class,
prefixed with `CUSTOM_` and distinct from the standard resource classes.
Normal Response Codes: 201
Error response codes: badRequest(400), conflict(409)
A `400 BadRequest` response code will be returned if the resource class
does not have prefix `CUSTOM_`.
A `409 Conflict` response code will
be returned if another resource class exists with the provided name.
Request
-------
.. rest_parameters:: parameters.yaml
- name: resource_class_custom
Request example
---------------
.. literalinclude:: ./samples/resource_classes/create-resource_classes-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- Location: location
No body content is returned on a successful POST.
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/resource_provider.inc 0000664 0000000 0000000 00000005772 15132464062 0025761 0 ustar 00root root 0000000 0000000 =================
Resource Provider
=================
See `Resource providers`_ for a description.
This group of API calls works with a single resource provider
identified by `uuid`. One resource provider can be listed, updated and
deleted.
Show resource provider
======================
.. rest_method:: GET /resource_providers/{uuid}
Return a representation of the resource provider identified by `{uuid}`.
Normal Response Codes: 200
Error response codes: itemNotFound(404)
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
Response
--------
.. rest_parameters:: parameters.yaml
- generation: resource_provider_generation
- uuid: resource_provider_uuid
- links: resource_provider_links
- name: resource_provider_name
- parent_provider_uuid: resource_provider_parent_provider_uuid_response_1_14
- root_provider_uuid: resource_provider_root_provider_uuid_required
Response Example
----------------
.. literalinclude:: ./samples/resource_providers/get-resource_provider.json
:language: javascript
Update resource provider
========================
.. rest_method:: PUT /resource_providers/{uuid}
Update the name of the resource provider identified by `{uuid}`.
Normal Response Codes: 200
Error response codes: badRequest(400), itemNotFound(404), conflict(409)
A `409 Conflict` response code will be returned if another resource
provider exists with the provided name.
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
- name: resource_provider_name
- parent_provider_uuid: resource_provider_parent_provider_uuid_request
Request example
---------------
.. literalinclude:: ./samples/resource_providers/update-resource_provider-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- generation: resource_provider_generation
- uuid: resource_provider_uuid
- links: resource_provider_links
- name: resource_provider_name
- parent_provider_uuid: resource_provider_parent_provider_uuid_response_1_14
- root_provider_uuid: resource_provider_root_provider_uuid_required
Response Example
----------------
.. literalinclude:: ./samples/resource_providers/update-resource_provider.json
:language: javascript
Delete resource provider
========================
.. rest_method:: DELETE /resource_providers/{uuid}
Delete the resource provider identified by `{uuid}`.
This will also disassociate aggregates and delete inventories.
Normal Response Codes: 204
Error response codes: itemNotFound(404), conflict(409)
A `409 Conflict` response code will be returned if there exist
allocations records for any of the inventories that would be deleted
as a result of removing the resource provider.
This error code will be also returned if there are existing child resource
providers under the parent resource provider being deleted.
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
Response
--------
No body content is returned on a successful DELETE.
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/resource_provider_allocations.inc 0000664 0000000 0000000 00000001704 15132464062 0030340 0 ustar 00root root 0000000 0000000 =============================
Resource provider allocations
=============================
See `Allocations`_ for a description.
List resource provider allocations
==================================
Return a representation of all allocations made against this resource
provider, keyed by consumer uuid. Each allocation includes one or more
classes of resource and the amount consumed.
.. rest_method:: GET /resource_providers/{uuid}/allocations
Normal Response Codes: 200
Error response codes: itemNotFound(404)
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
Response
--------
.. rest_parameters:: parameters.yaml
- allocations: resource_provider_allocations
- resources: resources
- resource_provider_generation: resource_provider_generation
Response Example
----------------
.. literalinclude:: ./samples/resource_provider_allocations/get-resource_provider_allocations.json
:language: javascript
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/resource_provider_traits.inc 0000664 0000000 0000000 00000005706 15132464062 0027344 0 ustar 00root root 0000000 0000000 ========================
Resource provider traits
========================
See `Traits`_ for a description.
This group of API requests queries/edits the association between
traits and resource providers.
.. note:: Traits API requests are available starting from version 1.6.
List resource provider traits
=============================
Return a list of traits for the resource provider identified by `{uuid}`.
.. rest_method:: GET /resource_providers/{uuid}/traits
Normal Response Codes: 200
Error response codes: itemNotFound(404)
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
Response
--------
.. rest_parameters:: parameters.yaml
- traits: traits
- resource_provider_generation: resource_provider_generation
Response Example
----------------
.. literalinclude:: ./samples/resource_provider_traits/get-resource_provider-traits.json
:language: javascript
Update resource provider traits
===============================
Associate traits with the resource provider identified by `{uuid}`.
All the associated traits will be replaced by the traits specified in
the request body.
.. rest_method:: PUT /resource_providers/{uuid}/traits
Normal Response Codes: 200
Error response codes: badRequest(400), itemNotFound(404), conflict(409)
* `400 Bad Request` if any of the specified traits are not valid. The valid
traits can be queried by `GET /traits`.
* `409 Conflict` if the `resource_provider_generation` doesn't match with the
server side.
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
- traits: traits
- resource_provider_generation: resource_provider_generation
Request example
---------------
.. literalinclude:: ./samples/resource_provider_traits/update-resource_provider-traits-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- traits: traits
- resource_provider_generation: resource_provider_generation
Response Example
----------------
.. literalinclude:: ./samples/resource_provider_traits/update-resource_provider-traits.json
:language: javascript
Delete resource provider traits
===============================
Dissociate all the traits from the resource provider identified by `{uuid}`.
.. rest_method:: DELETE /resource_providers/{uuid}/traits
Normal Response Codes: 204
Error response codes: itemNotFound(404), conflict(409)
* `409 Conflict` if the provider's traits are updated by another
thread while attempting the operation.
.. note:: Since this request does not accept the resource provider generation,
it is not safe to use when multiple threads are managing traits for
a single provider. In such situations, use the
``PUT /resource_providers/{uuid}/traits`` API with an empty
``traits`` list.
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
Response
--------
No body content is returned on a successful DELETE.
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/resource_provider_usages.inc 0000664 0000000 0000000 00000002075 15132464062 0027321 0 ustar 00root root 0000000 0000000 ========================
Resource provider usages
========================
Show the consumption of resources for a resource provider
in an aggregated form, i.e. without information for a particular consumer.
See `Resource provider allocations`_.
List resource provider usages
=============================
Return a report of usage information for resources associated with
the resource provider identified by `{uuid}`. The value is a dictionary
of resource classes paired with the sum of the allocations of that
resource class for this resource provider.
.. rest_method:: GET /resource_providers/{uuid}/usages
Normal Response Codes: 200
Error response codes: itemNotFound(404)
Request
-------
.. rest_parameters:: parameters.yaml
- uuid: resource_provider_uuid_path
Response
--------
.. rest_parameters:: parameters.yaml
- resource_provider_generation: resource_provider_generation
- usages: resource_provider_usages
Response Example
----------------
.. literalinclude:: ./samples/resource_provider_usages/get-resource_provider_usages.json
:language: javascript
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/resource_providers.inc 0000664 0000000 0000000 00000006120 15132464062 0026130 0 ustar 00root root 0000000 0000000 ==================
Resource Providers
==================
Resource providers are entities which provide consumable inventory of one or
more classes of resource (such as disk or memory). They can be listed (with
filters), created, updated and deleted.
List resource providers
=======================
.. rest_method:: GET /resource_providers
List an optionally filtered collection of resource providers.
Normal Response Codes: 200
Error response codes: badRequest(400)
A `400 BadRequest` response code will be returned
if a resource class specified in ``resources`` request parameter
does not exist.
Request
-------
Several query parameters are available to filter the returned list of
resource providers. If multiple different parameters are provided, the results
of all filters are merged with a boolean `AND`.
.. rest_parameters:: parameters.yaml
- name: resource_provider_name_query
- uuid: resource_provider_uuid_query
- member_of: resource_provider_member_of
- resources: resources_query_1_4
- in_tree: resource_provider_tree_query
- required: resource_provider_required_query
Response
--------
.. rest_parameters:: parameters.yaml
- resource_providers: resource_providers
- generation: resource_provider_generation
- uuid: resource_provider_uuid
- links: resource_provider_links
- name: resource_provider_name
- parent_provider_uuid: resource_provider_parent_provider_uuid_response_1_14
- root_provider_uuid: resource_provider_root_provider_uuid_required
Response Example
----------------
.. literalinclude:: ./samples/resource_providers/get-resource_providers.json
:language: javascript
Create resource provider
========================
.. rest_method:: POST /resource_providers
Create a new resource provider.
Normal Response Codes: 201 (microversions 1.0 - 1.19), 200 (microversions
1.20 - )
Error response codes: conflict(409)
A `409 Conflict` response code will
be returned if another resource provider exists with the provided name
or uuid.
Request
-------
.. rest_parameters:: parameters.yaml
- name: resource_provider_name
- uuid: resource_provider_uuid_opt
- parent_provider_uuid: resource_provider_parent_provider_uuid_request
Request example
---------------
.. literalinclude:: ./samples/resource_providers/create-resource_providers-request.json
:language: javascript
Response (microversions 1.0 - 1.19)
-----------------------------------
.. rest_parameters:: parameters.yaml
- Location: location
No body content is returned on a successful POST.
Response (microversions 1.20 - )
--------------------------------
.. rest_parameters:: parameters.yaml
- Location: location
- generation: resource_provider_generation
- uuid: resource_provider_uuid
- links: resource_provider_links_v1_20
- name: resource_provider_name
- parent_provider_uuid: resource_provider_parent_provider_uuid_required_no_min
- root_provider_uuid: resource_provider_root_provider_uuid_no_min
Response Example (microversions 1.20 - )
----------------------------------------
.. literalinclude:: ./samples/resource_providers/create-resource_provider.json
:language: javascript
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/root.inc 0000664 0000000 0000000 00000002612 15132464062 0023171 0 ustar 00root root 0000000 0000000 ============
API Versions
============
In order to bring new features to users over time, the Placement API
supports microversioning. Microversions allow use of certain features on a
per-request basis via the ``OpenStack-API-Version`` header. For example, to
request microversion 1.10, specify the header::
OpenStack-API-Version: placement 1.10
For more details about Microversions, please reference:
`Microversion Specification
`_
.. note:: The maximum microversion supported by each release varies.
Please reference:
`REST API Version History
`__
for API microversion history details.
List Versions
=============
.. rest_method:: GET /
Fetch information about all known major versions of the placement API,
including information about the minimum and maximum microversions.
.. note:: At this time there is only one major version of the placement API:
version 1.0.
Normal Response Codes: 200
Response
--------
.. rest_parameters:: parameters.yaml
- versions: versions
- id: version_id
- min_version: version_min
- max_version: version_max
- status: version_status
- links: version_links
Response Example
----------------
.. literalinclude:: ./samples/root/get-root.json
:language: javascript
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/ 0000775 0000000 0000000 00000000000 15132464062 0023156 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/aggregates/ 0000775 0000000 0000000 00000000000 15132464062 0025267 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/aggregates/get-aggregates-1.19.json 0000664 0000000 0000000 00000000244 15132464062 0031436 0 ustar 00root root 0000000 0000000 {
"aggregates": [
"42896e0d-205d-4fe3-bd1e-100924931787",
"5e08ea53-c4c6-448e-9334-ac4953de3cfa"
],
"resource_provider_generation": 8
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/aggregates/get-aggregates.json 0000664 0000000 0000000 00000000175 15132464062 0031053 0 ustar 00root root 0000000 0000000 {
"aggregates": [
"42896e0d-205d-4fe3-bd1e-100924931787",
"5e08ea53-c4c6-448e-9334-ac4953de3cfa"
]
}
update-aggregates-1.19.json 0000664 0000000 0000000 00000000244 15132464062 0032062 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/aggregates {
"aggregates": [
"42896e0d-205d-4fe3-bd1e-100924931787",
"5e08ea53-c4c6-448e-9334-ac4953de3cfa"
],
"resource_provider_generation": 9
}
update-aggregates-request-1.19.json 0000664 0000000 0000000 00000000244 15132464062 0033550 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/aggregates {
"aggregates": [
"42896e0d-205d-4fe3-bd1e-100924931787",
"5e08ea53-c4c6-448e-9334-ac4953de3cfa"
],
"resource_provider_generation": 9
}
update-aggregates-request.json 0000664 0000000 0000000 00000000133 15132464062 0033157 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/aggregates [
"42896e0d-205d-4fe3-bd1e-100924931787",
"5e08ea53-c4c6-448e-9334-ac4953de3cfa"
]
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/aggregates/update-aggregates.json 0000664 0000000 0000000 00000000175 15132464062 0031556 0 ustar 00root root 0000000 0000000 {
"aggregates": [
"42896e0d-205d-4fe3-bd1e-100924931787",
"5e08ea53-c4c6-448e-9334-ac4953de3cfa"
]
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocation_candidates/ 0000775 0000000 0000000 00000000000 15132464062 0027462 5 ustar 00root root 0000000 0000000 get-allocation_candidates-1.12.json 0000664 0000000 0000000 00000003471 15132464062 0035743 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocation_candidates {
"allocation_requests": [
{
"allocations": {
"a99bad54-a275-4c4f-a8a3-ac00d57e5c64": {
"resources": {
"DISK_GB": 100
}
},
"35791f28-fb45-4717-9ea9-435b3ef7c3b3": {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024
}
}
}
},
{
"allocations": {
"a99bad54-a275-4c4f-a8a3-ac00d57e5c64": {
"resources": {
"DISK_GB": 100
}
},
"915ef8ed-9b91-4e38-8802-2e4224ad54cd": {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024
}
}
}
}
],
"provider_summaries": {
"a99bad54-a275-4c4f-a8a3-ac00d57e5c64": {
"resources": {
"DISK_GB": {
"used": 0,
"capacity": 1900
}
}
},
"915ef8ed-9b91-4e38-8802-2e4224ad54cd": {
"resources": {
"VCPU": {
"used": 0,
"capacity": 384
},
"MEMORY_MB": {
"used": 0,
"capacity": 196608
}
}
},
"35791f28-fb45-4717-9ea9-435b3ef7c3b3": {
"resources": {
"VCPU": {
"used": 0,
"capacity": 384
},
"MEMORY_MB": {
"used": 0,
"capacity": 196608
}
}
}
}
}
get-allocation_candidates-1.17.json 0000664 0000000 0000000 00000003671 15132464062 0035752 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocation_candidates {
"allocation_requests": [
{
"allocations": {
"a99bad54-a275-4c4f-a8a3-ac00d57e5c64": {
"resources": {
"DISK_GB": 100
}
},
"35791f28-fb45-4717-9ea9-435b3ef7c3b3": {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024
}
}
}
},
{
"allocations": {
"a99bad54-a275-4c4f-a8a3-ac00d57e5c64": {
"resources": {
"DISK_GB": 100
}
},
"915ef8ed-9b91-4e38-8802-2e4224ad54cd": {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024
}
}
}
}
],
"provider_summaries": {
"a99bad54-a275-4c4f-a8a3-ac00d57e5c64": {
"resources": {
"DISK_GB": {
"used": 0,
"capacity": 1900
}
},
"traits": ["HW_CPU_X86_SSE2", "HW_CPU_X86_AVX2"]
},
"915ef8ed-9b91-4e38-8802-2e4224ad54cd": {
"resources": {
"VCPU": {
"used": 0,
"capacity": 384
},
"MEMORY_MB": {
"used": 0,
"capacity": 196608
}
},
"traits": ["HW_NIC_SRIOV"]
},
"35791f28-fb45-4717-9ea9-435b3ef7c3b3": {
"resources": {
"VCPU": {
"used": 0,
"capacity": 384
},
"MEMORY_MB": {
"used": 0,
"capacity": 196608
}
},
"traits": []
}
}
}
get-allocation_candidates-1.29.json 0000664 0000000 0000000 00000005273 15132464062 0035755 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocation_candidates {
"allocation_requests": [
{
"allocations": {
"a99bad54-a275-4c4f-a8a3-ac00d57e5c64": {
"resources": {
"DISK_GB": 100
}
},
"35791f28-fb45-4717-9ea9-435b3ef7c3b3": {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024
}
}
}
},
{
"allocations": {
"a99bad54-a275-4c4f-a8a3-ac00d57e5c64": {
"resources": {
"DISK_GB": 100
}
},
"915ef8ed-9b91-4e38-8802-2e4224ad54cd": {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024
}
}
}
}
],
"provider_summaries": {
"a99bad54-a275-4c4f-a8a3-ac00d57e5c64": {
"resources": {
"DISK_GB": {
"used": 0,
"capacity": 1900
}
},
"traits": ["MISC_SHARES_VIA_AGGREGATE"],
"parent_provider_uuid": null,
"root_provider_uuid": "a99bad54-a275-4c4f-a8a3-ac00d57e5c64"
},
"35791f28-fb45-4717-9ea9-435b3ef7c3b3": {
"resources": {
"VCPU": {
"used": 0,
"capacity": 384
},
"MEMORY_MB": {
"used": 0,
"capacity": 196608
}
},
"traits": ["HW_CPU_X86_SSE2", "HW_CPU_X86_AVX2"],
"parent_provider_uuid": null,
"root_provider_uuid": "35791f28-fb45-4717-9ea9-435b3ef7c3b3"
},
"915ef8ed-9b91-4e38-8802-2e4224ad54cd": {
"resources": {
"VCPU": {
"used": 0,
"capacity": 384
},
"MEMORY_MB": {
"used": 0,
"capacity": 196608
}
},
"traits": ["HW_NIC_SRIOV"],
"parent_provider_uuid": null,
"root_provider_uuid": "915ef8ed-9b91-4e38-8802-2e4224ad54cd"
},
"f5120cad-67d9-4f20-9210-3092a79a28cf": {
"resources": {
"SRIOV_NET_VF": {
"used": 0,
"capacity": 8
}
},
"traits": [],
"parent_provider_uuid": "915ef8ed-9b91-4e38-8802-2e4224ad54cd",
"root_provider_uuid": "915ef8ed-9b91-4e38-8802-2e4224ad54cd"
}
}
}
get-allocation_candidates-1.34.json 0000664 0000000 0000000 00000004662 15132464062 0035752 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocation_candidates {
"allocation_requests": [
{
"allocations": {
"92e971c9-777a-48bf-a181-a2ca1105c015": {
"resources": {
"NET_BW_EGR_KILOBIT_PER_SEC": 10
}
},
"cefbdf54-05a8-4db4-ad2b-d6729e5a4de8": {
"resources": {
"NET_BW_EGR_KILOBIT_PER_SEC": 20
}
},
"9a9c6b0f-e8d1-4d16-b053-a2bfe8a76757": {
"resources": {
"VCPU": 1
}
}
},
"mappings": {
"_NET1": [
"92e971c9-777a-48bf-a181-a2ca1105c015"
],
"_NET2": [
"cefbdf54-05a8-4db4-ad2b-d6729e5a4de8"
],
"": [
"9a9c6b0f-e8d1-4d16-b053-a2bfe8a76757"
]
}
}
],
"provider_summaries": {
"be99627d-e848-44ef-8341-683e2e557c58": {
"resources": {},
"traits": [
"COMPUTE_VOLUME_MULTI_ATTACH"
],
"parent_provider_uuid": null,
"root_provider_uuid": "be99627d-e848-44ef-8341-683e2e557c58"
},
"9a9c6b0f-e8d1-4d16-b053-a2bfe8a76757": {
"resources": {
"VCPU": {
"capacity": 4,
"used": 0
},
"MEMORY_MB": {
"capacity": 2048,
"used": 0
}
},
"traits": [
"HW_NUMA_ROOT",
"CUSTOM_FOO"
],
"parent_provider_uuid": "be99627d-e848-44ef-8341-683e2e557c58",
"root_provider_uuid": "be99627d-e848-44ef-8341-683e2e557c58"
},
"ba415f98-1960-4488-b2ed-4518b77eaa60": {
"resources": {},
"traits": [
"CUSTOM_VNIC_TYPE_DIRECT"
],
"parent_provider_uuid": "be99627d-e848-44ef-8341-683e2e557c58",
"root_provider_uuid": "be99627d-e848-44ef-8341-683e2e557c58"
},
"92e971c9-777a-48bf-a181-a2ca1105c015": {
"resources": {
"NET_BW_EGR_KILOBIT_PER_SEC": {
"capacity": 10000,
"used": 0
}
},
"traits": [
"CUSTOM_PHYSNET1"
],
"parent_provider_uuid": "ba415f98-1960-4488-b2ed-4518b77eaa60",
"root_provider_uuid": "be99627d-e848-44ef-8341-683e2e557c58"
},
"cefbdf54-05a8-4db4-ad2b-d6729e5a4de8": {
"resources": {
"NET_BW_EGR_KILOBIT_PER_SEC": {
"capacity": 20000,
"used": 0
}
},
"traits": [
"CUSTOM_PHYSNET2"
],
"parent_provider_uuid": "ba415f98-1960-4488-b2ed-4518b77eaa60",
"root_provider_uuid": "be99627d-e848-44ef-8341-683e2e557c58"
}
}
}
get-allocation_candidates.json 0000664 0000000 0000000 00000001526 15132464062 0035363 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocation_candidates {
"allocation_requests": [
{
"allocations": [
{
"resource_provider": {
"uuid": "30742363-f65e-4012-a60a-43e0bec38f0e"
},
"resources": {
"MEMORY_MB": 512
}
}
]
}
],
"provider_summaries": {
"30742363-f65e-4012-a60a-43e0bec38f0e": {
"resources": {
"DISK_GB": {
"capacity": 77,
"used": 0
},
"MEMORY_MB": {
"capacity": 11206,
"used": 256
},
"VCPU": {
"capacity": 64,
"used": 0
}
}
}
}
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocations/ 0000775 0000000 0000000 00000000000 15132464062 0025466 5 ustar 00root root 0000000 0000000 get-allocations-1.28.json 0000664 0000000 0000000 00000001014 15132464062 0031751 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocations {
"allocations": {
"92637880-2d79-43c6-afab-d860886c6391": {
"generation": 2,
"resources": {
"DISK_GB": 5
}
},
"ba8e1ef8-7fa3-41a4-9bb4-d7cb2019899b": {
"generation": 8,
"resources": {
"MEMORY_MB": 512,
"VCPU": 2
}
}
},
"consumer_generation": 1,
"project_id": "7e67cbf7-7c38-4a32-b85b-0739c690991a",
"user_id": "067f691e-725a-451a-83e2-5c3d13e1dffc"
}
get-allocations-1.38.json 0000664 0000000 0000000 00000001056 15132464062 0031760 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocations {
"allocations": {
"92637880-2d79-43c6-afab-d860886c6391": {
"generation": 2,
"resources": {
"DISK_GB": 5
}
},
"ba8e1ef8-7fa3-41a4-9bb4-d7cb2019899b": {
"generation": 8,
"resources": {
"MEMORY_MB": 512,
"VCPU": 2
}
}
},
"consumer_generation": 1,
"project_id": "7e67cbf7-7c38-4a32-b85b-0739c690991a",
"user_id": "067f691e-725a-451a-83e2-5c3d13e1dffc",
"consumer_type": "INSTANCE"
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocations/get-allocations.json 0000664 0000000 0000000 00000000756 15132464062 0031456 0 ustar 00root root 0000000 0000000 {
"allocations": {
"92637880-2d79-43c6-afab-d860886c6391": {
"generation": 2,
"resources": {
"DISK_GB": 5
}
},
"ba8e1ef8-7fa3-41a4-9bb4-d7cb2019899b": {
"generation": 8,
"resources": {
"MEMORY_MB": 512,
"VCPU": 2
}
}
},
"project_id": "7e67cbf7-7c38-4a32-b85b-0739c690991a",
"user_id": "067f691e-725a-451a-83e2-5c3d13e1dffc"
}
manage-allocations-request-1.28.json 0000664 0000000 0000000 00000001721 15132464062 0034115 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocations {
"30328d13-e299-4a93-a102-61e4ccabe474": {
"consumer_generation": 1,
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"allocations": {
"e10927c4-8bc9-465d-ac60-d2f79f7e4a00": {
"resources": {
"VCPU": 2,
"MEMORY_MB": 3
},
"generation": 4
}
}
},
"71921e4e-1629-4c5b-bf8d-338d915d2ef3": {
"consumer_generation": 1,
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"allocations": {}
},
"48c1d40f-45d8-4947-8d46-52b4e1326df8": {
"consumer_generation": 1,
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"allocations": {
"e10927c4-8bc9-465d-ac60-d2f79f7e4a00": {
"resources": {
"VCPU": 4,
"MEMORY_MB": 5
},
"generation": 12
}
}
}
}
manage-allocations-request-1.38.json 0000664 0000000 0000000 00000002066 15132464062 0034121 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocations {
"30328d13-e299-4a93-a102-61e4ccabe474": {
"consumer_generation": 1,
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"allocations": {
"e10927c4-8bc9-465d-ac60-d2f79f7e4a00": {
"resources": {
"VCPU": 2,
"MEMORY_MB": 3
},
"generation": 4
}
},
"consumer_type": "INSTANCE"
},
"71921e4e-1629-4c5b-bf8d-338d915d2ef3": {
"consumer_generation": 1,
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"allocations": {},
"consumer_type": "MIGRATION"
},
"48c1d40f-45d8-4947-8d46-52b4e1326df8": {
"consumer_generation": 1,
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"allocations": {
"e10927c4-8bc9-465d-ac60-d2f79f7e4a00": {
"resources": {
"VCPU": 4,
"MEMORY_MB": 5
},
"generation": 12
}
},
"consumer_type": "INSTANCE"
}
}
manage-allocations-request.json 0000664 0000000 0000000 00000001503 15132464062 0033525 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocations {
"30328d13-e299-4a93-a102-61e4ccabe474": {
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"allocations": {
"e10927c4-8bc9-465d-ac60-d2f79f7e4a00": {
"resources": {
"VCPU": 2,
"MEMORY_MB": 3
}
}
}
},
"71921e4e-1629-4c5b-bf8d-338d915d2ef3": {
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"allocations": {}
},
"48c1d40f-45d8-4947-8d46-52b4e1326df8": {
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"allocations": {
"e10927c4-8bc9-465d-ac60-d2f79f7e4a00": {
"resources": {
"VCPU": 4,
"MEMORY_MB": 5
}
}
}
}
}
update-allocations-request-1.12.json 0000664 0000000 0000000 00000000556 15132464062 0034145 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocations {
"allocations": {
"4e061c03-611e-4caa-bf26-999dcff4284e": {
"resources": {
"DISK_GB": 20
}
},
"89873422-1373-46e5-b467-f0c5e6acf08f": {
"resources": {
"MEMORY_MB": 1024,
"VCPU": 1
}
}
},
"user_id": "66cb2f29-c86d-47c3-8af5-69ae7b778c70",
"project_id": "42a32c07-3eeb-4401-9373-68a8cdca6784"
}
update-allocations-request-1.28.json 0000664 0000000 0000000 00000000612 15132464062 0034145 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocations {
"allocations": {
"4e061c03-611e-4caa-bf26-999dcff4284e": {
"resources": {
"DISK_GB": 20
}
},
"89873422-1373-46e5-b467-f0c5e6acf08f": {
"resources": {
"MEMORY_MB": 1024,
"VCPU": 1
}
}
},
"consumer_generation": 1,
"user_id": "66cb2f29-c86d-47c3-8af5-69ae7b778c70",
"project_id": "42a32c07-3eeb-4401-9373-68a8cdca6784"
}
update-allocations-request-1.38.json 0000664 0000000 0000000 00000000652 15132464062 0034152 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocations {
"allocations": {
"4e061c03-611e-4caa-bf26-999dcff4284e": {
"resources": {
"DISK_GB": 20
}
},
"89873422-1373-46e5-b467-f0c5e6acf08f": {
"resources": {
"MEMORY_MB": 1024,
"VCPU": 1
}
}
},
"consumer_generation": 1,
"user_id": "66cb2f29-c86d-47c3-8af5-69ae7b778c70",
"project_id": "42a32c07-3eeb-4401-9373-68a8cdca6784",
"consumer_type": "INSTANCE"
}
update-allocations-request.json 0000664 0000000 0000000 00000001106 15132464062 0033556 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/allocations {
"allocations": [
{
"resource_provider": {
"uuid": "844ac34d-620e-474c-833c-4c9921251353"
},
"resources": {
"MEMORY_MB": 512,
"VCPU": 2
}
},
{
"resource_provider": {
"uuid": "92637880-2d79-43c6-afab-d860886c6391"
},
"resources": {
"DISK_GB": 5
}
}
],
"project_id": "6e3b2ce9-9175-4830-a862-b9de690bdceb",
"user_id": "81c516e3-5e0e-4dcb-9a38-4473d229a950"
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/inventories/ 0000775 0000000 0000000 00000000000 15132464062 0025523 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/inventories/get-inventories.json 0000664 0000000 0000000 00000001252 15132464062 0031540 0 ustar 00root root 0000000 0000000 {
"inventories": {
"DISK_GB": {
"allocation_ratio": 1.0,
"max_unit": 35,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 35
},
"MEMORY_MB": {
"allocation_ratio": 1.5,
"max_unit": 5825,
"min_unit": 1,
"reserved": 512,
"step_size": 1,
"total": 5825
},
"VCPU": {
"allocation_ratio": 16.0,
"max_unit": 4,
"min_unit": 1,
"reserved": 0,
"step_size": 1,
"total": 4
}
},
"resource_provider_generation": 7
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/inventories/get-inventory.json 0000664 0000000 0000000 00000000245 15132464062 0031231 0 ustar 00root root 0000000 0000000 {
"allocation_ratio": 16.0,
"max_unit": 4,
"min_unit": 1,
"reserved": 0,
"resource_provider_generation": 9,
"step_size": 1,
"total": 4
}
update-inventories-request.json 0000664 0000000 0000000 00000000523 15132464062 0033652 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/inventories {
"inventories": {
"MEMORY_MB": {
"allocation_ratio": 2.0,
"max_unit": 16,
"step_size": 4,
"total": 128
},
"VCPU": {
"allocation_ratio": 10.0,
"reserved": 2,
"total": 64
}
},
"resource_provider_generation": 1
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/inventories/update-inventories.json 0000664 0000000 0000000 00000000744 15132464062 0032250 0 ustar 00root root 0000000 0000000 {
"inventories": {
"MEMORY_MB": {
"allocation_ratio": 2.0,
"max_unit": 16,
"min_unit": 1,
"reserved": 0,
"step_size": 4,
"total": 128
},
"VCPU": {
"allocation_ratio": 10.0,
"max_unit": 2147483647,
"min_unit": 1,
"reserved": 2,
"step_size": 1,
"total": 64
}
},
"resource_provider_generation": 2
}
update-inventory-request.json 0000664 0000000 0000000 00000000073 15132464062 0033342 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/inventories {
"resource_provider_generation": 7,
"total": 50
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/inventories/update-inventory.json 0000664 0000000 0000000 00000000256 15132464062 0031736 0 ustar 00root root 0000000 0000000 {
"allocation_ratio": 1.0,
"max_unit": 2147483647,
"min_unit": 1,
"reserved": 0,
"resource_provider_generation": 8,
"step_size": 1,
"total": 50
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/reshaper/ 0000775 0000000 0000000 00000000000 15132464062 0024767 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/reshaper/post-reshaper-1.30.json 0000664 0000000 0000000 00000003243 15132464062 0031037 0 ustar 00root root 0000000 0000000 {
"allocations": {
"9ae60315-80c2-48a0-a168-ca4f27c307e1": {
"allocations": {
"a7466641-cd72-499b-b6c9-c208eacecb3d": {
"resources": {
"DISK_GB": 1000
}
}
},
"project_id": "2f0c4ffc-4c4d-407a-b334-56297b871b7f",
"user_id": "cc8a0fe0-2b7c-4392-ae51-747bc73cf473",
"consumer_generation": 1
},
"4a6444e5-10d6-43f6-9a0b-8acce9309ac9": {
"allocations": {
"c4ddddbb-01ee-4814-85c9-f57a962c22ba": {
"resources": {
"VCPU": 1
}
},
"a7466641-cd72-499b-b6c9-c208eacecb3d": {
"resources": {
"DISK_GB": 20
}
}
},
"project_id": "2f0c4ffc-4c4d-407a-b334-56297b871b7f",
"user_id": "406e1095-71cb-47b9-9b3c-aedb7f663f5a",
"consumer_generation": 1
},
"e10e7ca0-2ac5-4c98-bad9-51c95b1930ed": {
"allocations": {
"c4ddddbb-01ee-4814-85c9-f57a962c22ba": {
"resources": {
"VCPU": 8
}
}
},
"project_id": "2f0c4ffc-4c4d-407a-b334-56297b871b7f",
"user_id": "cc8a0fe0-2b7c-4392-ae51-747bc73cf473",
"consumer_generation": 1
}
},
"inventories": {
"c4ddddbb-01ee-4814-85c9-f57a962c22ba": {
"inventories": {
"VCPU": {
"max_unit": 8,
"total": 10
}
},
"resource_provider_generation": null
},
"a7466641-cd72-499b-b6c9-c208eacecb3d": {
"inventories": {
"DISK_GB": {
"min_unit": 10,
"total": 2048,
"max_unit": 1200,
"step_size": 10
}
},
"resource_provider_generation": 5
}
}
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/reshaper/post-reshaper-1.38.json 0000664 0000000 0000000 00000003415 15132464062 0031050 0 ustar 00root root 0000000 0000000 {
"allocations": {
"9ae60315-80c2-48a0-a168-ca4f27c307e1": {
"allocations": {
"a7466641-cd72-499b-b6c9-c208eacecb3d": {
"resources": {
"DISK_GB": 1000
}
}
},
"project_id": "2f0c4ffc-4c4d-407a-b334-56297b871b7f",
"user_id": "cc8a0fe0-2b7c-4392-ae51-747bc73cf473",
"consumer_generation": 1,
"consumer_type": "INSTANCE"
},
"4a6444e5-10d6-43f6-9a0b-8acce9309ac9": {
"allocations": {
"c4ddddbb-01ee-4814-85c9-f57a962c22ba": {
"resources": {
"VCPU": 1
}
},
"a7466641-cd72-499b-b6c9-c208eacecb3d": {
"resources": {
"DISK_GB": 20
}
}
},
"project_id": "2f0c4ffc-4c4d-407a-b334-56297b871b7f",
"user_id": "406e1095-71cb-47b9-9b3c-aedb7f663f5a",
"consumer_generation": 1,
"consumer_type": "INSTANCE"
},
"e10e7ca0-2ac5-4c98-bad9-51c95b1930ed": {
"allocations": {
"c4ddddbb-01ee-4814-85c9-f57a962c22ba": {
"resources": {
"VCPU": 8
}
}
},
"project_id": "2f0c4ffc-4c4d-407a-b334-56297b871b7f",
"user_id": "cc8a0fe0-2b7c-4392-ae51-747bc73cf473",
"consumer_generation": 1,
"consumer_type": "INSTANCE"
}
},
"inventories": {
"c4ddddbb-01ee-4814-85c9-f57a962c22ba": {
"inventories": {
"VCPU": {
"max_unit": 8,
"total": 10
}
},
"resource_provider_generation": null
},
"a7466641-cd72-499b-b6c9-c208eacecb3d": {
"inventories": {
"DISK_GB": {
"min_unit": 10,
"total": 2048,
"max_unit": 1200,
"step_size": 10
}
},
"resource_provider_generation": 5
}
}
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_classes/ 0000775 0000000 0000000 00000000000 15132464062 0026522 5 ustar 00root root 0000000 0000000 create-resource_classes-request.json 0000664 0000000 0000000 00000000030 15132464062 0035622 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_classes {"name": "CUSTOM_FPGA"}
get-resource_class.json 0000664 0000000 0000000 00000000241 15132464062 0033124 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_classes {
"links": [
{
"href": "/placement/resource_classes/CUSTOM_FPGA",
"rel": "self"
}
],
"name": "CUSTOM_FPGA"
}
get-resource_classes.json 0000664 0000000 0000000 00000004461 15132464062 0033464 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_classes {
"resource_classes": [
{
"links": [
{
"href": "/placement/resource_classes/VCPU",
"rel": "self"
}
],
"name": "VCPU"
},
{
"links": [
{
"href": "/placement/resource_classes/MEMORY_MB",
"rel": "self"
}
],
"name": "MEMORY_MB"
},
{
"links": [
{
"href": "/placement/resource_classes/DISK_GB",
"rel": "self"
}
],
"name": "DISK_GB"
},
{
"links": [
{
"href": "/placement/resource_classes/PCI_DEVICE",
"rel": "self"
}
],
"name": "PCI_DEVICE"
},
{
"links": [
{
"href": "/placement/resource_classes/SRIOV_NET_VF",
"rel": "self"
}
],
"name": "SRIOV_NET_VF"
},
{
"links": [
{
"href": "/placement/resource_classes/NUMA_SOCKET",
"rel": "self"
}
],
"name": "NUMA_SOCKET"
},
{
"links": [
{
"href": "/placement/resource_classes/NUMA_CORE",
"rel": "self"
}
],
"name": "NUMA_CORE"
},
{
"links": [
{
"href": "/placement/resource_classes/NUMA_THREAD",
"rel": "self"
}
],
"name": "NUMA_THREAD"
},
{
"links": [
{
"href": "/placement/resource_classes/NUMA_MEMORY_MB",
"rel": "self"
}
],
"name": "NUMA_MEMORY_MB"
},
{
"links": [
{
"href": "/placement/resource_classes/IPV4_ADDRESS",
"rel": "self"
}
],
"name": "IPV4_ADDRESS"
}
]
}
update-resource_class-request.json 0000664 0000000 0000000 00000000033 15132464062 0035314 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_classes {"name": "CUSTOM_FPGA_V2"}
update-resource_class.json 0000664 0000000 0000000 00000000247 15132464062 0033635 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_classes {
"links": [
{
"href": "/placement/resource_classes/CUSTOM_FPGA_V2",
"rel": "self"
}
],
"name": "CUSTOM_FPGA_V2"
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_provider_allocations/ 0000775 0000000 0000000 00000000000 15132464062 0031307 5 ustar 00root root 0000000 0000000 get-resource_provider_allocations.json 0000664 0000000 0000000 00000001055 15132464062 0041032 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_provider_allocations {
"allocations": {
"56785a3f-6f1c-4fec-af0b-0faf075b1fcb": {
"resources": {
"MEMORY_MB": 256,
"VCPU": 1
}
},
"9afd5aeb-d6b9-4dea-a588-1e6327a91834": {
"resources": {
"MEMORY_MB": 512,
"VCPU": 2
}
},
"9d16a611-e7f9-4ef3-be26-c61ed01ecefb": {
"resources": {
"MEMORY_MB": 1024,
"VCPU": 1
}
}
},
"resource_provider_generation": 12
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_provider_traits/ 0000775 0000000 0000000 00000000000 15132464062 0030305 5 ustar 00root root 0000000 0000000 get-resource_provider-traits.json 0000664 0000000 0000000 00000000202 15132464062 0036735 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_provider_traits {
"resource_provider_generation": 1,
"traits": [
"CUSTOM_HW_FPGA_CLASS1",
"CUSTOM_HW_FPGA_CLASS3"
]
}
update-resource_provider-traits-request.json 0000664 0000000 0000000 00000000202 15132464062 0041126 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_provider_traits {
"resource_provider_generation": 0,
"traits": [
"CUSTOM_HW_FPGA_CLASS1",
"CUSTOM_HW_FPGA_CLASS3"
]
}
update-resource_provider-traits.json 0000664 0000000 0000000 00000000202 15132464062 0037440 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_provider_traits {
"resource_provider_generation": 1,
"traits": [
"CUSTOM_HW_FPGA_CLASS1",
"CUSTOM_HW_FPGA_CLASS3"
]
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_provider_usages/ 0000775 0000000 0000000 00000000000 15132464062 0030266 5 ustar 00root root 0000000 0000000 get-resource_provider_usages.json 0000664 0000000 0000000 00000000203 15132464062 0036762 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_provider_usages {
"resource_provider_generation": 1,
"usages": {
"DISK_GB": 1,
"MEMORY_MB": 512,
"VCPU": 1
}
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_providers/ 0000775 0000000 0000000 00000000000 15132464062 0027102 5 ustar 00root root 0000000 0000000 create-resource_provider.json 0000664 0000000 0000000 00000002177 15132464062 0034727 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_providers {
"generation": 0,
"links": [
{
"href": "/placement/resource_providers/7d2590ae-fb85-4080-9306-058b4c915e3f",
"rel": "self"
},
{
"href": "/placement/resource_providers/7d2590ae-fb85-4080-9306-058b4c915e3f/aggregates",
"rel": "aggregates"
},
{
"href": "/placement/resource_providers/7d2590ae-fb85-4080-9306-058b4c915e3f/inventories",
"rel": "inventories"
},
{
"href": "/placement/resource_providers/7d2590ae-fb85-4080-9306-058b4c915e3f/usages",
"rel": "usages"
},
{
"href": "/placement/resource_providers/7d2590ae-fb85-4080-9306-058b4c915e3f/traits",
"rel": "traits"
},
{
"href": "/placement/resource_providers/7d2590ae-fb85-4080-9306-058b4c915e3f/allocations",
"rel": "allocations"
}
],
"name": "NFS Share",
"uuid": "7d2590ae-fb85-4080-9306-058b4c915e3f",
"parent_provider_uuid": "542df8ed-9be2-49b9-b4db-6d3183ff8ec8",
"root_provider_uuid": "542df8ed-9be2-49b9-b4db-6d3183ff8ec8"
}
create-resource_providers-request.json 0000664 0000000 0000000 00000000224 15132464062 0036567 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_providers {
"name": "NFS Share",
"uuid": "7d2590ae-fb85-4080-9306-058b4c915e3f",
"parent_provider_uuid": "542df8ed-9be2-49b9-b4db-6d3183ff8ec8"
}
get-resource_provider.json 0000664 0000000 0000000 00000002207 15132464062 0034235 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_providers {
"generation": 0,
"links": [
{
"href": "/placement/resource_providers/3b4005be-d64b-456f-ba36-0ffd02718868",
"rel": "self"
},
{
"href": "/placement/resource_providers/3b4005be-d64b-456f-ba36-0ffd02718868/aggregates",
"rel": "aggregates"
},
{
"href": "/placement/resource_providers/3b4005be-d64b-456f-ba36-0ffd02718868/inventories",
"rel": "inventories"
},
{
"href": "/placement/resource_providers/3b4005be-d64b-456f-ba36-0ffd02718868/usages",
"rel": "usages"
},
{
"href": "/placement/resource_providers/3b4005be-d64b-456f-ba36-0ffd02718868/traits",
"rel": "traits"
},
{
"href": "/placement/resource_providers/3b4005be-d64b-456f-ba36-0ffd02718868/allocations",
"rel": "allocations"
}
],
"name": "Ceph Storage Pool",
"uuid": "3b4005be-d64b-456f-ba36-0ffd02718868",
"parent_provider_uuid": "542df8ed-9be2-49b9-b4db-6d3183ff8ec8",
"root_provider_uuid": "542df8ed-9be2-49b9-b4db-6d3183ff8ec8"
}
get-resource_providers.json 0000664 0000000 0000000 00000004205 15132464062 0034420 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_providers {
"resource_providers": [
{
"generation": 1,
"uuid": "99c09379-6e52-4ef8-9a95-b9ce6f68452e",
"links": [
{
"href": "/resource_providers/99c09379-6e52-4ef8-9a95-b9ce6f68452e",
"rel": "self"
},
{
"href": "/resource_providers/99c09379-6e52-4ef8-9a95-b9ce6f68452e/aggregates",
"rel": "aggregates"
},
{
"href": "/resource_providers/99c09379-6e52-4ef8-9a95-b9ce6f68452e/inventories",
"rel": "inventories"
},
{
"href": "/resource_providers/99c09379-6e52-4ef8-9a95-b9ce6f68452e/usages",
"rel": "usages"
},
{
"href": "/resource_providers/99c09379-6e52-4ef8-9a95-b9ce6f68452e/traits",
"rel": "traits"
},
{
"href": "/resource_providers/99c09379-6e52-4ef8-9a95-b9ce6f68452e/allocations",
"rel": "allocations"
}
],
"name": "vgr.localdomain",
"parent_provider_uuid": "542df8ed-9be2-49b9-b4db-6d3183ff8ec8",
"root_provider_uuid": "542df8ed-9be2-49b9-b4db-6d3183ff8ec8"
},
{
"generation": 2,
"uuid": "d0b381e9-8761-42de-8e6c-bba99a96d5f5",
"links": [
{
"href": "/resource_providers/d0b381e9-8761-42de-8e6c-bba99a96d5f5",
"rel": "self"
},
{
"href": "/resource_providers/d0b381e9-8761-42de-8e6c-bba99a96d5f5/aggregates",
"rel": "aggregates"
},
{
"href": "/resource_providers/d0b381e9-8761-42de-8e6c-bba99a96d5f5/inventories",
"rel": "inventories"
},
{
"href": "/resource_providers/d0b381e9-8761-42de-8e6c-bba99a96d5f5/usages",
"rel": "usages"
},
{
"href": "/resource_providers/d0b381e9-8761-42de-8e6c-bba99a96d5f5/traits",
"rel": "traits"
},
{
"href": "/resource_providers/d0b381e9-8761-42de-8e6c-bba99a96d5f5/allocations",
"rel": "allocations"
}
],
"name": "pony1",
"parent_provider_uuid": null,
"root_provider_uuid": "d0b381e9-8761-42de-8e6c-bba99a96d5f5"
}
]
}
update-resource_provider-request.json 0000664 0000000 0000000 00000000151 15132464062 0036422 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_providers {
"name": "Shared storage",
"parent_provider_uuid": "542df8ed-9be2-49b9-b4db-6d3183ff8ec8"
}
update-resource_provider.json 0000664 0000000 0000000 00000002204 15132464062 0034735 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/resource_providers {
"generation": 0,
"links": [
{
"href": "/placement/resource_providers/33f26ae0-dbf2-485b-a24a-244d8280e29f",
"rel": "self"
},
{
"href": "/placement/resource_providers/33f26ae0-dbf2-485b-a24a-244d8280e29f/aggregates",
"rel": "aggregates"
},
{
"href": "/placement/resource_providers/33f26ae0-dbf2-485b-a24a-244d8280e29f/inventories",
"rel": "inventories"
},
{
"href": "/placement/resource_providers/33f26ae0-dbf2-485b-a24a-244d8280e29f/usages",
"rel": "usages"
},
{
"href": "/placement/resource_providers/33f26ae0-dbf2-485b-a24a-244d8280e29f/traits",
"rel": "traits"
},
{
"href": "/placement/resource_providers/33f26ae0-dbf2-485b-a24a-244d8280e29f/allocations",
"rel": "allocations"
}
],
"name": "Shared storage",
"uuid": "33f26ae0-dbf2-485b-a24a-244d8280e29f",
"parent_provider_uuid": "542df8ed-9be2-49b9-b4db-6d3183ff8ec8",
"root_provider_uuid": "d0b381e9-8761-42de-8e6c-bba99a96d5f5"
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/root/ 0000775 0000000 0000000 00000000000 15132464062 0024141 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/root/get-root.json 0000664 0000000 0000000 00000000477 15132464062 0026604 0 ustar 00root root 0000000 0000000 {
"versions" : [
{
"min_version" : "1.0",
"id" : "v1.0",
"max_version" : "1.28",
"status": "CURRENT",
"links": [
{
"href": "",
"rel": "self"
}
]
}
]
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/traits/ 0000775 0000000 0000000 00000000000 15132464062 0024464 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/traits/get-traits.json 0000664 0000000 0000000 00000000174 15132464062 0027444 0 ustar 00root root 0000000 0000000 {
"traits": [
"CUSTOM_HW_FPGA_CLASS1",
"CUSTOM_HW_FPGA_CLASS2",
"CUSTOM_HW_FPGA_CLASS3"
]
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/usages/ 0000775 0000000 0000000 00000000000 15132464062 0024445 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/usages/get-usages-1.38.json 0000664 0000000 0000000 00000000647 15132464062 0030002 0 ustar 00root root 0000000 0000000 {
"usages" : {
"INSTANCE" : {
"consumer_count" : 5,
"MEMORY_MB" : 512,
"VCPU" : 2,
"DISK_GB" : 5
},
"MIGRATION" : {
"DISK_GB" : 5,
"VCPU" : 2,
"consumer_count" : 2,
"MEMORY_MB" : 512
},
"unknown" : {
"VCPU" : 2,
"DISK_GB" : 5,
"consumer_count" : 1,
"MEMORY_MB" : 512
}
}
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/samples/usages/get-usages.json 0000664 0000000 0000000 00000000134 15132464062 0027402 0 ustar 00root root 0000000 0000000 {
"usages": {
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
}
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/traits.inc 0000664 0000000 0000000 00000005662 15132464062 0023524 0 ustar 00root root 0000000 0000000 ======
Traits
======
Traits are *qualitative* characteristics of resource providers.
The classic example for traits can be requesting disk from different
providers: a user may request 80GiB of disk space for an instance
(quantitative), but may also expect that the disk be SSD instead of
spinning disk (qualitative). Traits provide a way to mark that a
storage provider is SSD or spinning.
.. note:: Traits API requests are available starting from version 1.6.
List traits
===========
Return a list of valid trait strings according to parameters specified.
.. rest_method:: GET /traits
Normal Response Codes: 200
Request
-------
Several query parameters are available to filter the returned list of
traits. If multiple different parameters are provided, the results
of all filters are merged with a boolean `AND`.
.. rest_parameters:: parameters.yaml
- name: trait_name_query
- associated: trait_associated
Response
--------
.. rest_parameters:: parameters.yaml
- traits: traits
Response Example
----------------
.. literalinclude:: ./samples/traits/get-traits.json
:language: javascript
Show traits
===========
Check if a trait name exists in this cloud.
.. rest_method:: GET /traits/{name}
Normal Response Codes: 204
Error response codes: itemNotFound(404)
Request
-------
.. rest_parameters:: parameters.yaml
- name: trait_name
Response
--------
No body content is returned on a successful GET.
Update traits
=============
Insert a new custom trait. If traits already exists 204 will be returned.
There are two kinds of traits: the standard traits and the custom traits.
The standard traits are interoperable across different OpenStack cloud
deployments. The definition of standard traits comes from the `os-traits`
library. The standard traits are read-only in the placement API which means
that the user can't modify any standard traits through API.
The custom traits are used by admin users to manage the non-standard
qualitative information of resource providers.
.. rest_method:: PUT /traits/{name}
Normal Response Codes: 201, 204
Error response codes: badRequest(400)
* `400 BadRequest` if trait name is not prefixed with `CUSTOM_` prefix.
Request
-------
.. rest_parameters:: parameters.yaml
- name: trait_name
Response
--------
.. rest_parameters:: parameters.yaml
- Location: location
No body content is returned on a successful PUT.
Delete traits
=============
Delete the trait specified be `{name}`. Note that only custom traits can be
deleted.
.. rest_method:: DELETE /traits/{name}
Normal Response Codes: 204
Error response codes: badRequest(400), itemNotFound(404), conflict(409)
* `400 BadRequest` if the name to delete is standard trait.
* `404 Not Found` if no such trait exists.
* `409 Conflict` if the name to delete has associations with any
ResourceProvider.
Request
-------
.. rest_parameters:: parameters.yaml
- name: trait_name
Response
--------
No body content is returned on a successful DELETE.
placement-14.0.0+git20260116.35.cd24dcb5/api-ref/source/usages.inc 0000664 0000000 0000000 00000002617 15132464062 0023502 0 ustar 00root root 0000000 0000000 ======
Usages
======
Represent the consumption of resources for a project and user.
.. note:: Usages API requests are available starting from version 1.9.
List usages
===========
Return a report of usage information for resources associated with the
project identified by `project_id` and user identified by
`user_id`. The value is a dictionary of resource classes paired with
the sum of the allocations of that resource class for provided
parameters.
.. rest_method:: GET /usages
Normal Response Codes: 200
Error response codes: badRequest(400)
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id
- user_id: user_id
- consumer_type: consumer_type_req
Response (microversions 1.38 - )
--------------------------------
.. rest_parameters:: parameters.yaml
- usages.consumer_type: consumer_type
- usages.consumer_type.consumer_count: consumer_count
- usages.consumer_type.RESOURCE_CLASS: resources_single
Response Example (microversions 1.38 - )
----------------------------------------
.. literalinclude:: ./samples/usages/get-usages-1.38.json
:language: javascript
Response (microversions 1.9 - 1.36)
-----------------------------------
.. rest_parameters:: parameters.yaml
- usages: resources
Response Example (microversions 1.9 - 1.36)
-------------------------------------------
.. literalinclude:: ./samples/usages/get-usages.json
:language: javascript
placement-14.0.0+git20260116.35.cd24dcb5/bindep.txt 0000664 0000000 0000000 00000002541 15132464062 0020673 0 ustar 00root root 0000000 0000000 # This is a cross-platform list tracking distribution packages needed for install and tests;
# see https://docs.openstack.org/infra/bindep/ for additional information.
build-essential [platform:dpkg test]
gcc [platform:rpm test]
# gettext and graphviz are needed by doc builds only. For transition,
# have them in both doc and test.
# TODO(jaegerandi): Remove test once infra scripts are updated.
gettext [doc test]
graphviz [doc test]
language-pack-en [platform:ubuntu]
libffi-dev [platform:dpkg test]
libffi-devel [platform:rpm test]
libmysqlclient-dev [platform:ubuntu]
libmariadb-dev-compat [platform:debian]
libpq-dev [platform:dpkg test]
libsqlite3-dev [platform:dpkg test]
libxml2-dev [platform:dpkg test]
libxslt-devel [platform:rpm test]
libxslt1-dev [platform:dpkg test]
locales [platform:debian]
mysql [platform:rpm]
mysql-client [platform:dpkg !platform:debian]
mysql-devel [platform:rpm test]
mysql-server [!platform:debian]
mariadb-server [platform:debian]
pkg-config [platform:dpkg test]
pkgconfig [platform:rpm test]
postgresql
postgresql-client [platform:dpkg]
postgresql-devel [platform:rpm test]
postgresql-server [platform:rpm]
python3-all [platform:dpkg test]
python3-all-dev [platform:dpkg test]
python3 [platform:rpm test]
python3-devel [platform:rpm test]
sqlite-devel [platform:rpm test]
libpcre3-dev [platform:dpkg doc]
pcre-devel [platform:rpm doc]
placement-14.0.0+git20260116.35.cd24dcb5/doc/ 0000775 0000000 0000000 00000000000 15132464062 0017434 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/README.rst 0000664 0000000 0000000 00000000555 15132464062 0021130 0 ustar 00root root 0000000 0000000 OpenStack Placement Documentation README
========================================
Configuration, contributor, install, and usage documentation
is sourced here and built to:
https://docs.openstack.org/placement/latest/
Note that the Placement API reference is maintained under
the ``/api-ref`` directory and built to:
https://docs.openstack.org/api-ref/placement/
placement-14.0.0+git20260116.35.cd24dcb5/doc/requirements.txt 0000664 0000000 0000000 00000000600 15132464062 0022714 0 ustar 00root root 0000000 0000000 sphinx>=2.1.1 # BSD
sphinxcontrib-actdiag>=0.8.5 # BSD
sphinxcontrib-seqdiag>=0.8.4 # BSD
sphinx-feature-classification>=0.2.0 # Apache-2.0
os-api-ref>=1.4.0 # Apache-2.0
openstackdocstheme>=2.2.1 # Apache-2.0
# releasenotes
reno>=3.1.0 # Apache-2.0
# redirect tests in docs
whereto>=0.3.0 # Apache-2.0
# needed to generate osprofiler config options
osprofiler>=1.4.0 # Apache-2.0
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/ 0000775 0000000 0000000 00000000000 15132464062 0020734 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/_extra/ 0000775 0000000 0000000 00000000000 15132464062 0022216 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/_extra/.htaccess 0000664 0000000 0000000 00000001413 15132464062 0024013 0 ustar 00root root 0000000 0000000 redirectmatch 301 ^/placement/([^/]+)/specs/train/approved/2005297-negative-aggregate-membership.html /placement/$1/specs/train/implemented/2005297-negative-aggregate-membership.html
redirectmatch 301 ^/placement/([^/]+)/specs/train/approved/placement-resource-provider-request-group-mapping-in-allocation-candidates.html /placement/$1/specs/train/implemented/placement-resource-provider-request-group-mapping-in-allocation-candidates.html
redirectmatch 301 ^/placement/([^/]+)/specs/train/approved/2005575-nested-magic-1.html /placement/$1/specs/train/implemented/2005575-nested-magic-1.html
redirectmatch 301 ^/placement/([^/]+)/usage/index.html /placement/$1/user/index.html
redirectmatch 301 ^/placement/([^/]+)/usage/provider-tree.html /placement/$1/user/provider-tree.html
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/_static/ 0000775 0000000 0000000 00000000000 15132464062 0022362 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/_static/.placeholder 0000664 0000000 0000000 00000000256 15132464062 0024650 0 ustar 00root root 0000000 0000000 Sphinx 2.2.0 gets upset when a directory it is configured for does not exist.
This directory is only used for automatically generated configuration and
policy sample files.
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/admin/ 0000775 0000000 0000000 00000000000 15132464062 0022024 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/admin/index.rst 0000664 0000000 0000000 00000001206 15132464062 0023664 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Upgrade
=======
.. toctree::
:maxdepth: 2
upgrade-notes
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/admin/upgrade-notes.rst 0000664 0000000 0000000 00000001613 15132464062 0025334 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
=============
Upgrade Notes
=============
This section provide notes on upgrading to a given target release.
.. note::
As a reminder, the
:ref:`placement-status upgrade check ` tool can be
used to help determine the status of your deployment and how ready it is to
perform an upgrade.
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/cli/ 0000775 0000000 0000000 00000000000 15132464062 0021503 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/cli/index.rst 0000664 0000000 0000000 00000001417 15132464062 0023347 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Command-line Utilities
======================
In this section you will find information on placement's command line
utilities:
.. toctree::
:maxdepth: 1
placement-manage
placement-status
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/cli/placement-manage.rst 0000664 0000000 0000000 00000010521 15132464062 0025432 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
================
placement-manage
================
Synopsis
========
::
placement-manage
Description
===========
:program:`placement-manage` is used to perform administrative tasks with the
placement service. It is designed for use by operators and deployers.
Options
=======
The standard pattern for executing a ``placement-manage`` command is::
placement-manage [-h] [--config-dir DIR] [--config-file PATH]
[]
Run without arguments to see a list of available command categories::
placement-manage
You can also run with a category argument such as ``db`` to see a list of all
commands in that category::
placement-manage db
Configuration options (for example the ``[placement_database]/connection``
URL) are by default found in a file at ``/etc/placement/placement.conf``. The
``config-dir`` and ``config-file`` arguments may be used to select a different
file.
The following sections describe the available categories and arguments for
placement-manage.
Placement Database
~~~~~~~~~~~~~~~~~~
``placement-manage db version``
Print the current database version.
``placement-manage db sync``
Upgrade the database schema to the most recent version. The local database
connection is determined by ``[placement_database]/connection`` in the
configuration file used by placement-manage. If the ``connection`` option
is not set, the command will fail. The defined database must already exist.
``placement-manage db stamp ``
Stamp the revision table with the given revision; don’t run any migrations.
This can be used when the database already exists and you want to bring it
under alembic control.
``placement-manage db online_data_migrations [--max-count]``
Perform data migration to update all live data.
``--max-count`` controls the maximum number of objects to migrate in a given
call. If not specified, migration will occur in batches of 50 until fully
complete.
Returns exit code 0 if no (further) updates are possible, 1 if the
``--max-count`` option was used and some updates were completed successfully
(even if others generated errors), 2 if some updates generated errors and no
other migrations were able to take effect in the last batch attempted, or
127 if invalid input is provided (e.g. non-numeric max-count).
This command should be called after upgrading database schema and placement
services on all controller nodes. If it exits with partial updates (exit
status 1) it should be called again, even if some updates initially
generated errors, because some updates may depend on others having
completed. If it exits with status 2, intervention is required to resolve
the issue causing remaining updates to fail. It should be considered
successfully completed only when the exit status is 0.
For example::
$ placement-manage db online_data_migrations
Running batches of 50 until complete
2 rows matched query create_incomplete_consumers, 2 migrated
+---------------------------------------------+-------------+-----------+
| Migration | Total Found | Completed |
+---------------------------------------------+-------------+-----------+
| set_root_provider_ids | 0 | 0 |
| create_incomplete_consumers | 2 | 2 |
+---------------------------------------------+-------------+-----------+
In the above example, the ``create_incomplete_consumers`` migration
found two candidate records which required a data migration. Since
``--max-count`` defaults to 50 and only two records were migrated with no
more candidates remaining, the command completed successfully with exit
code 0.
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/cli/placement-status.rst 0000664 0000000 0000000 00000005215 15132464062 0025531 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
================
placement-status
================
Synopsis
========
::
placement-status []
Description
===========
:program:`placement-status` is a tool that provides routines for checking the
status of a Placement deployment.
Options
=======
The standard pattern for executing a :program:`placement-status` command is::
placement-status []
Run without arguments to see a list of available command categories::
placement-status
Categories are:
* ``upgrade``
Detailed descriptions are below.
You can also run with a category argument such as ``upgrade`` to see a list of
all commands in that category::
placement-status upgrade
These sections describe the available categories and arguments for
:program:`placement-status`.
Upgrade
~~~~~~~
.. _placement-status-checks:
``placement-status upgrade check``
Performs a release-specific readiness check before restarting services with
new code. This command expects to have complete configuration and access
to databases and services.
**Return Codes**
.. list-table::
:widths: 20 80
:header-rows: 1
* - Return code
- Description
* - 0
- All upgrade readiness checks passed successfully and there is nothing
to do.
* - 1
- At least one check encountered an issue and requires further
investigation. This is considered a warning but the upgrade may be OK.
* - 2
- There was an upgrade status check failure that needs to be
investigated. This should be considered something that stops an
upgrade.
* - 255
- An unexpected error occurred.
**History of Checks**
**1.0.0 (Stein)**
* Checks were added for incomplete consumers and missing root provider ids
both of which can be remedied by running the
``placement-manage db online_data_migrations`` command.
**2.0.0 (Train)**
* The ``Missing Root Provider IDs`` upgrade check will now result in a
failure if there are still ``resource_providers`` records with a null
``root_provider_id`` value.
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/conf.py 0000664 0000000 0000000 00000010702 15132464062 0022233 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# placement documentation build configuration file
#
# Refer to the Sphinx documentation for advice on configuring this file:
#
# http://www.sphinx-doc.org/en/stable/config.html
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# TODO(efried): Trim this moar
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'openstackdocstheme',
'sphinx.ext.coverage',
'sphinx.ext.graphviz',
'sphinx_feature_classification.support_matrix',
'oslo_config.sphinxconfiggen',
'oslo_config.sphinxext',
'oslo_policy.sphinxpolicygen',
'oslo_policy.sphinxext',
'sphinxcontrib.actdiag',
'sphinxcontrib.seqdiag',
]
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/placement'
openstackdocs_pdf_link = True
openstackdocs_use_storyboard = True
config_generator_config_file = '../../etc/placement/config-generator.conf'
sample_config_basename = '_static/placement'
policy_generator_config_file = [
('../../etc/placement/policy-generator.conf',
'_static/placement')
]
actdiag_html_image_format = 'SVG'
actdiag_antialias = True
seqdiag_html_image_format = 'SVG'
seqdiag_antialias = True
todo_include_todos = True
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = '2010-present, OpenStack Foundation'
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['placement.']
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'openstackdocs'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_extra_path = ['_extra']
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'doc-placement.tex', 'Placement Documentation',
'OpenStack Foundation', 'manual'),
]
latex_domain_indices = False
latex_elements = {
'makeindex': '',
'printindex': '',
'preamble': r'\setcounter{tocdepth}{3}',
'maxlistdepth': '10',
}
# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
latex_use_xindy = False
# Disable smartquotes, they don't work in latex
smartquotes_excludes = {'builders': ['latex']}
# -- Options for openstackdocstheme -------------------------------------------
# keep this ordered to keep mriedem happy
openstackdocs_projects = [
'neutron',
'nova',
'oslo.versionedobjects',
'placement',
'reno',
]
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/configuration/ 0000775 0000000 0000000 00000000000 15132464062 0023603 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/configuration/config.rst 0000664 0000000 0000000 00000000455 15132464062 0025606 0 ustar 00root root 0000000 0000000 =====================
Configuration Options
=====================
The following is an overview of all available configuration options in
Placement. For a sample configuration file, refer to
:doc:`/configuration/sample-config`.
.. show-options::
:config-file: etc/placement/config-generator.conf
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/configuration/index.rst 0000664 0000000 0000000 00000002434 15132464062 0025447 0 ustar 00root root 0000000 0000000 ===================
Configuration Guide
===================
The static configuration for Placement lives in two main files: ``placement.conf`` and
``policy.yaml``. These are described below.
Configuration
-------------
* :doc:`Config Reference `: A complete reference of all
configuration options available in the ``placement.conf`` file.
* :doc:`Sample Config File `: A sample config
file with inline documentation.
.. TODO(efried):: Get this working
* :nova-doc:`Configuration Guide `: Detailed
configuration guides for various parts of you Nova system. Helpful reference
for setting up specific hypervisor backends.
Policy
------
Placement, like most OpenStack projects, uses a policy language to restrict
permissions on REST API actions.
* :doc:`Policy Reference `: A complete
reference of all policy points in placement and what they impact.
* :doc:`Sample Policy File `: A sample
placement policy file with inline documentation.
.. # NOTE(mriedem): This is the section where we hide things that we don't
# actually want in the table of contents but sphinx build would fail if
# they aren't in the toctree somewhere.
.. toctree::
:hidden:
policy
sample-policy
config
sample-config
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/configuration/policy.rst 0000664 0000000 0000000 00000001137 15132464062 0025636 0 ustar 00root root 0000000 0000000 ==================
Placement Policies
==================
.. warning::
JSON formatted policy file is deprecated since Placement 5.0.0 (Wallaby).
The `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing
JSON-formatted policy file to YAML in a backward-compatible way.
.. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html
The following is an overview of all available policies in Placement.
For a sample configuration file, refer to
:doc:`/configuration/sample-policy`.
.. show-policy::
:config-file: etc/placement/policy-generator.conf
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/configuration/sample-config.rst 0000664 0000000 0000000 00000001155 15132464062 0027063 0 ustar 00root root 0000000 0000000 =========================
Sample Configuration File
=========================
The following is a sample Placement configuration for adaptation and use. For a
detailed overview of all available configuration options, refer to
:doc:`/configuration/config`.
The sample configuration can also be viewed in :download:`file form
`.
.. important::
The sample configuration file is auto-generated from placement when this
documentation is built. You must ensure your version of placement matches
the version of this documentation.
.. literalinclude:: /_static/placement.conf.sample
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/configuration/sample-policy.rst 0000664 0000000 0000000 00000001526 15132464062 0027117 0 ustar 00root root 0000000 0000000 ============================
Sample Placement Policy File
============================
.. warning::
JSON formatted policy file is deprecated since Placement 5.0.0 (Wallaby).
The `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing
JSON-formatted policy file to YAML in a backward-compatible way.
.. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html
The following is a sample placement policy file for adaptation and use.
The sample policy can also be viewed in :download:`file form
`.
.. important::
The sample policy file is auto-generated from placement when this
documentation is built. You must ensure your version of placement matches
the version of this documentation.
.. literalinclude:: /_static/placement.policy.yaml.sample
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/contributor/ 0000775 0000000 0000000 00000000000 15132464062 0023306 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/contributor/api-ref-guideline.rst 0000664 0000000 0000000 00000012345 15132464062 0027333 0 ustar 00root root 0000000 0000000 =======================
API reference guideline
=======================
The API reference should be updated when placement APIs are modified
(microversion is bumped, etc.).
This page describes the guideline for updating the API reference.
API reference
=============
* `Placement API reference `_
The guideline to write the API reference
========================================
The API reference consists of the following files.
* API reference text: ``api-ref/source/*.inc``
* Parameter definition: ``api-ref/source/parameters.yaml``
* JSON request/response samples: ``api-ref/source/samples/*``
Structure of inc file
---------------------
Each REST API is described in the text file (\*.inc).
The structure of inc file is as follows:
- Title
- API Name
- REST Method
- URL
- Description
- Normal status code
- Error status code
- Request
- Parameters
- JSON request body example (if exists)
- Response
- Parameters
- JSON response body example (if exists)
- API Name (Next)
- ...
REST Method
-----------
The guideline for describing HTTP methods is described in this section.
All supported methods by resource should be listed in the API reference.
The order of methods
~~~~~~~~~~~~~~~~~~~~
Methods have to be sorted by each URI in the following order:
1. GET
2. POST
3. PUT
4. PATCH (unused by Nova)
5. DELETE
And sorted from broadest to narrowest. So for /severs it would be:
1. GET /servers
2. POST /servers
3. GET /servers/details
4. GET /servers/{server_id}
5. PUT /servers/{server_id}
6. DELETE /servers/{server_id}
Method titles spelling and case
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The spelling and the case of method names in the title have to match
what is in the code. For instance, the title for the section on method
"Get Rdp Console" should be "Get Rdp Console (os-getRDPConsole Action)"
NOT "Get Rdp Console (Os-Getrdpconsole Action)"
Response codes
~~~~~~~~~~~~~~
The normal response codes (20x) and error response codes
have to be listed. The order of response codes should be in ascending order.
The description of typical error response codes are as follows:
.. list-table:: Error response codes
:header-rows: 1
* - Response codes
- Description
* - 400
- badRequest(400)
* - 401
- unauthorized(401)
* - 403
- forbidden(403)
* - 404
- itemNotFound(404)
* - 409
- conflict(409)
* - 410
- gone(410)
* - 501
- notImplemented(501)
* - 503
- serviceUnavailable(503)
Parameters
----------
Parameters need to be defined by 2 subsections.
The one is in the 'Request' subsection, the other is in the 'Response'
subsection. The queries, request headers and attributes go in the 'Request'
subsection and response headers and attributes go in the 'Response'
subsection.
The order of parameters in each API
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The request and response parameters have to be listed in the following order
in each API in the text file.
1. Header
2. Path
3. Query
4. Body
a. Top level object (i.e. server)
b. Required fields
c. Optional fields
d. Parameters added in microversions (by the microversion they were added)
Parameter type
~~~~~~~~~~~~~~
The parameters are defined in the parameter file (``parameters.yaml``).
The type of parameters have to be one of followings:
* ``array``
It is a list.
* ``boolean``
* ``float``
* ``integer``
* ``none``
The value is always ``null`` in a response or
should be ``null`` in a request.
* ``object``
The value is dict.
* ``string``
If the value can be specified by multiple types, specify one type
in the file and mention the other types in the description.
Required or optional
~~~~~~~~~~~~~~~~~~~~
In the parameter file, define the ``required`` field in each parameter.
.. list-table::
:widths: 15 85
* - ``true``
- The parameter must be specified in the request, or
the parameter always appears in the response.
* - ``false``
- It is not always necessary to specify the parameter in the request, or
the parameter does not appear in the response in some cases.
e.g. A config option defines whether the parameter appears
in the response or not. A parameter appears when administrators call
but does not appear when non-admin users call.
If a parameter must be specified in the request or always appears
in the response in the micoversion added or later,
the parameter must be defined as required (``true``).
The order of parameters in the parameter file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The order of parameters in the parameter file has to be kept as follows:
1. By in type
a. Header
b. Path
c. Query
d. Body
2. Then alphabetical by name
Example
-------
.. TODO::
The guideline for request/response JSON bodies should be added.
Body
----
.. TODO::
The guideline for the introductory text and the context for the resource
in question should be added.
Reference
=========
* `The description for Parameters whose values are null `_
* `The definition of "Optional" parameter `_
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/contributor/architecture.rst 0000664 0000000 0000000 00000041634 15132464062 0026532 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
==============
Architecture
==============
The placement service is straightforward: It is a `WSGI`_ application that
sends and receives JSON, using an RDBMS (usually MySQL) for persistence.
As state is managed solely in the DB, scaling the placement service is done by
increasing the number of WSGI application instances and scaling the RDBMS using
traditional database scaling techniques.
For sake of consistency and because there was initially intent to make the
entities in the placement service available over RPC,
:oslo.versionedobjects-doc:`versioned objects <>` were used to provide the
interface between the HTTP application layer and the SQLAlchemy-driven
persistence layer. In the Stein release, that interface was refactored to
remove the use of versioned objects and split functionality into smaller
modules.
Though the placement service does not aspire to be a *microservice* it does
aspire to continue to be small and minimally complex. This means a relatively
small amount of middleware that is not configurable, and a limited number of
exposed resources where any given resource is represented by one (and only
one) URL that expresses a noun that is a member of the system. Adding
additional resources should be considered a significant change requiring robust
review from many stakeholders.
The set of HTTP resources represents a concise and constrained grammar for
expressing the management of resource providers, inventories, resource classes,
traits, and allocations. If a solution is initially designed to need more
resources or a more complex grammar that may be a sign that we need to give our
goals greater scrutiny. Is there a way to do what we want with what we have
already? Can some other service help? Is a new collaborating service required?
Minimal Framework
=================
The API is set up to use a minimal framework that tries to keep the structure
of the application as discoverable as possible and keeps the HTTP interaction
near the surface. The goal of this is to make things easy to trace when
debugging or adding functionality.
Functionality which is required for every request is handled in raw WSGI
middleware that is composed in the ``placement.deploy`` module. Dispatch or
routing is handled declaratively via the ``ROUTE_DECLARATIONS`` map defined in
the ``placement.handler`` module.
Mapping is by URL plus request method. The destination is a complete WSGI
application, using a subclass of the `wsgify`_ method from `WebOb`_ to provide
a `Request`_ object that provides convenience methods for accessing request
headers, bodies, and query parameters and for generating responses. In the
placement API these mini-applications are called *handlers*. The ``wsgify``
subclass is provided in ``placement.wsgi_wrapper`` as ``PlacementWsgify``. It is
used to make sure that JSON formatted error responses are structured according
to the API-SIG `errors`_ guideline.
This division between middleware, dispatch and handlers is supposed to
provide clues on where a particular behavior or functionality should be
implemented. Like most such systems, this does not always work but is a useful
tool.
.. _microversion process:
Microversions
=============
The placement API makes use of `microversions`_ to allow the release of new
features on an opt in basis. See :doc:`/index` for an up to date
history of the available microversions.
The rules around when a microversion is needed are modeled after those of the
:nova-doc:`compute API `. When adding a new
microversion there are a few bits of required housekeeping that must be done in
the code:
* Update the ``VERSIONS`` list in ``placement/microversion.py`` to indicate the
new microversion and give a very brief summary of the added feature.
* Update ``placement/rest_api_version_history.rst`` to add a more detailed
section describing the new microversion.
* Add a :reno-doc:`release note <>` with a ``features`` section announcing the
new or changed feature and the microversion.
* If the ``version_handler`` decorator (see below) has been used, increment
``TOTAL_VERSIONED_METHODS`` in ``placement/tests/unit/test_microversion.py``.
This provides a confirmatory check just to make sure you are paying attention
and as a helpful reminder to do the other things in this list.
* Include functional gabbi tests as appropriate (see :doc:`testing`). At the
least, update the ``latest microversion`` test in
``placement/tests/functional/gabbits/microversion.yaml``.
* Update the `API Reference`_ documentation as appropriate. The source is
located under ``api-ref/source/``.
* If a new error code has been added in ``placement/errors.py``, it should
be added to the `API Reference`_.
In the placement API, microversions only use the modern form of the
version header::
OpenStack-API-Version: placement 1.2
If a valid microversion is present in a request it will be placed,
as a ``Version`` object, into the WSGI environment with the
``placement.microversion`` key. Often, accessing this in handler
code directly (to control branching) is the most explicit and
granular way to have different behavior per microversion. A
``Version`` instance can be treated as a tuple of two ints and
compared as such or there is a ``matches`` method.
A ``version_handler`` decorator is also available. It makes it possible to have
multiple different handler methods of the same (fully-qualified by package)
name, each available for a different microversion window. If a request wants a
microversion that is not available, a defined status code is returned (usually
``404`` or ``405``). There is a unit test in place which will fail if there are
version intersections.
Adding a New Handler
====================
Adding a new URL or a new method (e.g, ``PATCH``) to an existing URL
requires adding a new handler function. In either case a new microversion and
release note is required. When adding an entirely new route a request for a
lower microversion should return a ``404``. When adding a new method to an
existing URL a request for a lower microversion should return a ``405``.
In either case, the ``ROUTE_DECLARATIONS`` dictionary in the
``placement.handler`` module should be updated to point to a
function within a module that contains handlers for the type of entity
identified by the URL. Collection and individual entity handlers of the same
type should be in the same module.
As mentioned above, the handler function should be decorated with
``@wsgi_wrapper.PlacementWsgify``, take a single argument ``req`` which is a
WebOb `Request`_ object, and return a WebOb `Response`_.
For ``PUT`` and ``POST`` methods, request bodies are expected to be JSON
based on a content-type of ``application/json``. This may be enforced by using
a decorator: ``@util.require_content('application/json')``. If the body is not
JSON, a ``415`` response status is returned.
Response bodies are usually JSON. A handler can check the ``Accept`` header
provided in a request using another decorator:
``@util.check_accept('application/json')``. If the header does not allow
JSON, a ``406`` response status is returned.
If a handler returns a response body, a ``Last-Modified`` header should be
included with the response. If the entity or entities in the response body
are directly associated with an object (or objects, in the case of a
collection response) that has an ``updated_at`` (or ``created_at``)
field, that field's value can be used as the value of the header (WebOb will
take care of turning the datetime object into a string timestamp). A
``util.pick_last_modified`` is available to help choose the most recent
last-modified when traversing a collection of entities.
If there is no directly associated object (for example, the output is the
composite of several objects) then the ``Last-Modified`` time should be
``timeutils.utcnow(with_timezone=True)`` (the timezone must be set in order
to be a valid HTTP timestamp). For example, the response__ to
``GET /allocation_candidates`` should have a last-modified header of now
because it is composed from queries against many different database entities,
presents a mixture of result types (allocation requests and provider
summaries), and has a view of the system that is only meaningful *now*.
__ https://docs.openstack.org/api-ref/placement/#list-allocation-candidates
If a ``Last-Modified`` header is set, then a ``Cache-Control`` header with a
value of ``no-cache`` must be set as well. This is to avoid user-agents
inadvertently caching the responses.
JSON sent in a request should be validated against a JSON Schema. A
``util.extract_json`` method is available. This takes a request body and a
schema. If multiple schema are used for different microversions of the same
request, the caller is responsible for selecting the right one before calling
``extract_json``.
When a handler needs to read or write the data store it should use methods on
the objects found in the ``placement.objects`` package. Doing so requires a
context which is provided to the handler method via the WSGI environment. It
can be retrieved as follows::
context = req.environ['placement.context']
.. note:: If your change requires new methods or new objects in the
``placement.objects`` package, after you have made sure that you really
do need those new methods or objects (you may not!) make those
changes in a patch that is separate from and prior to the HTTP API
change.
If a handler needs to return an error response, with the advent of `Placement
API Error Handling`_, it is possible to include a code in the JSON error
response. This can be used to distinguish different errors with the same HTTP
response status code (a common case is a generation conflict versus an
inventory in use conflict). Error codes are simple namespaced strings (e.g.,
``placement.inventory.inuse``) for which symbols are maintained in
``placement.errors``. Adding a symbol to a response is done
by using the ``comment`` kwarg to a WebOb exception, like this::
except exception.InventoryInUse as exc:
raise webob.exc.HTTPConflict(
_('update conflict: %(error)s') % {'error': exc},
comment=errors.INVENTORY_INUSE)
Code that adds newly raised exceptions should include an error code. Find
additional guidelines on use in the docs for ``placement.errors``. When a
new error code is added, also document it in the `API Reference`_.
Testing of handler code is described in :doc:`testing`.
Database Schema Changes
=======================
At some point in every application's life it becomes necessary to change the
structure of its database. Modifying the SQLAlchemy models (in
placement/db/sqlachemy/models.py) is necessary for the application to
understand the new structure, but that will not change the actual underlying
database. To do that, Placement uses ``alembic`` to run database migrations.
Alembic calls each change a **revision**. To create a migration with alembic,
run the ``alembic revision`` command. Alembic will then generate a new revision
file with a unique file name, and place it in the ``alembic/versions/``
directory:
.. code-block:: console
ed@devenv:~/projects/placement$ alembic -c placement/db/sqlalchemy/alembic.ini revision -m "Add column foo to bar table"
Generating /home/ed/projects/placement/placement/db/sqlalchemy/alembic/versions/dfb006498ad2_add_column_foo_to_bar_table.py ... done
Let us break down that command:
- The **-c** parameter tells alembic where to find its configuration file.
- **revision** is the alembic subcommand for creating a new revision file.
- The **-m** parameter specifies a brief comment explaining the change.
- The generated file from alembic will have a name consisting of a random hash
prefix, followed by an underscore, followed by your **-m** comment, and a
**.py** extension. So be sure to keep your comment as brief as possible
while still being descriptive.
The generated file will look something like this:
.. code-block:: python
"""Add column foo to bar table
Revision ID: dfb006498ad2
Revises: 0378df171af3
Create Date: 2018-10-29 20:02:58.290779
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dfb006498ad2'
down_revision = '0378df171af3'
branch_labels = None
depends_on = None
def upgrade():
pass
The top of the file is the docstring that will show when you review your
revision history. If we did not include the **-m** comment when we ran the
``alembic revision`` command, this would just contain "empty message". If you did
not specify the comment when creating the file, be sure to replace "empty
message" with a brief comment describing the reason for the database change.
You then need to define the changes in the ``upgrade()`` method. The code used in
these methods is basic SQLAlchemy code for creating and modifying tables. You
can examine existing migrations in the project to see examples of what this
code looks like, as well as find more in-depth usage of Alembic in the `Alembic
tutorial`_.
One other option when creating the revision is to add the ``--autogenerate``
parameter to the revision command. This assumes that you have already updated
the SQLAlchemy models, and have a connection to the placement database
configured. When run with this option, the ``upgrade()`` method of the revision
file is filled in for you by alembic as it compares the schema described in
your models.py script and the actual state of the database. You should always
verify the revision script to make sure it does just what you intended, both by
reading the code as well as running the tests, as there are some things that
autogenerate cannot deduce. See `autogenerate limitations`_ for more detailed
information.
Gotchas
=======
This section tries to shed some light on some of the differences between the
placement API and some of the other OpenStack APIs or on situations which may
be surprising or unexpected.
* The placement API is somewhat more strict about ``Content-Type`` and ``Accept``
headers in an effort to follow the HTTP RFCs.
If a user-agent sends some JSON in a ``PUT`` or ``POST`` request without a
``Content-Type`` of ``application/json`` the request will result in an error.
If a ``GET`` request is made without an ``Accept`` header, the response will
default to being ``application/json``.
If a request is made with an explicit ``Accept`` header that does not include
``application/json`` then there will be an error and the error will attempt to
be in the requested format (for example, ``text/plain``).
* If a URL exists, but a request is made using a method that that URL does not
support, the API will respond with a ``405`` error. Sometimes in the nova APIs
this can be a ``404`` (which is wrong, but understandable given the constraints
of the code).
* Because each handler is individually wrapped by the ``PlacementWsgify``
decorator any exception that is a subclass of ``webob.exc.WSGIHTTPException``
that is raised from within the handler, such as ``webob.exc.HTTPBadRequest``,
will be caught by WebOb and turned into a valid `Response`_ containing
headers and body set by WebOb based on the information given when the
exception was raised. It will not be seen as an exception by any of the
middleware in the placement stack.
In general this is a good thing, but it can lead to some confusion if, for
example, you are trying to add some middleware that operates on exceptions.
Other exceptions that are not from `WebOb`_ will raise outside the handlers
where they will either be caught in the ``__call__`` method of the
``PlacementHandler`` app that is responsible for dispatch, or by the
``FaultWrap`` middleware.
.. _WSGI: https://www.python.org/dev/peps/pep-3333/
.. _wsgify: http://docs.webob.org/en/latest/api/dec.html
.. _WebOb: http://docs.webob.org/en/latest/
.. _Request: http://docs.webob.org/en/latest/reference.html#request
.. _Response: http://docs.webob.org/en/latest/#response
.. _microversions: http://specs.openstack.org/openstack/api-wg/guidelines/microversion_specification.html
.. _errors: http://specs.openstack.org/openstack/api-wg/guidelines/errors.html
.. _API Reference: https://docs.openstack.org/api-ref/placement/
.. _Placement API Error Handling: http://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/placement-api-error-handling.html
.. _`Alembic tutorial`: https://alembic.zzzcomputing.com/en/latest/tutorial.html
.. _`autogenerate limitations`: https://alembic.zzzcomputing.com/en/latest/autogenerate.html#what-does-autogenerate-detect-and-what-does-it-not-detect
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/contributor/contributing.rst 0000664 0000000 0000000 00000031317 15132464062 0026554 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
============================
So You Want to Contribute...
============================
For general information on contributing to OpenStack, please check out the
`contributor guide `_ to get started.
It covers all the basics that are common to all OpenStack projects: the accounts
you need, the basics of interacting with our Gerrit review system, how we
communicate as a community, etc.
Below will cover the more project specific information you need to get started
with placement.
Communication
-------------
As an official OpenStack project, Placement follows the overarching processes
outlined in the `Project Team Guide`_. Contribution is welcomed from any
interested parties and takes many different forms.
To make sure everything gets the attention it deserves and work is not
duplicated there are some guidelines, stated here.
If in doubt, ask someone, either by sending a message to the
`openstack-discuss`_ mailing list with a ``[placement]`` subject tag, or by
visiting the ``#openstack-nova`` IRC channel on ``webchat.oftc.net``.
Submitting and Managing Bugs
----------------------------
Bugs found in placement should be reported in `Launchpad`_ by creating a new
bug in the ``placement`` project.
.. _new_bug:
New Bugs
~~~~~~~~
If you are submitting a `new bug`_, explain the problem, the steps taken that led
to the bad results, and the expected results. Please also add as much of the
following information as possible:
* Relevant lines from the ``placement-api`` log.
* The OpenStack release (e.g., ``Stein``).
* The method used to install or deploy placement.
* The operating system(s) on which the placement is running.
* The version(s) of Python being used.
Tag the bug with ``tags``, like doc, api, etcetera.
Learn more about launchpad from `openstack launchpad doc`_.
.. _triage:
Triaging Bugs
~~~~~~~~~~~~~
Triaging newly submitted bugs to confirm they really are bugs, gather missing
information, and to suggest possible solutions is one of the most important
contributions that can be made to any open source project.
If a new bug doesn't have tags, add the relevant tag as per the area of
affected code.
Leave comments on the bug if you have questions or ideas. If you are
relatively certain about a solution, add the steps of that solution as tasks
on the bug.
While triaging, only if you are sure, update the status of the bug from new
to others.
If submitting a change related to a bug, the `gerrit`_ system will
automatically link to launchpad bug if you include ``bug_id:`` identifiers in
your commit message, like this::
Related-Bug: 2005189
Partial-Bug: 2005190
Closes-Bug: 2005190
Reviewing Code
--------------
Like other OpenStack projects, Placement uses `gerrit`_ to facilitate peer code
review. It is hoped and expected that anyone who would like to commit code to
the Placement project will also spend time reviewing code for the sake of the
common good. The more people reviewing, the more code that will eventually
merge.
See `How to Review Changes the OpenStack Way`_ for an overview of the review
and voting process.
There is a small group of people within the Placement team called `core
reviewers`_. These are people who are empowered to signal (via the ``+2`` vote)
that code is of a suitable standard to be merged and is aligned with the
current goals of the project. Core reviewers are regularly selected from all
active reviewers based on the quantity and quality of their reviews and
demonstrated understanding of the Placement code and goals of the project.
The point of review is to evolve potentially useful code to merged working code
that is aligned with the standards of style, testing, and correctness that we
share as group. It is not for creating perfect code. Review should always be
`constructive`_, encouraging, and friendly. People who contribute code are
doing the project a favor, make it feel that way.
Some guidelines that reviewers and patch submitters should be aware of:
* It is very important that a new patch set gets some form of review as soon as
possible, even if only to say "we've seen this". Latency in the review
process has been identified as hugely discouraging for new and experienced
contributors alike.
* Follow up changes, to fix minor problems identified during review, are
encouraged. We want to keep things moving.
* As a reviewer, remember that not all patch submitters will know these
guidelines. If it seems they don't, point them here and be patient in the
meantime.
* Gerrit can be good for code review, but is often not a great environment for
having a discussion that is struggling to resolve to a decision. Move
discussion to the mailing list sooner rather than later. Add a link to the
thread in the `list archive`_ to the review.
* If the CI system is throwing up random failures in test runs, you should
endeavor whenever possible to investigate, not simply ``recheck``. A flakey
gate is an indication that OpenStack is not robust and at the root of all
this, making OpenStack work well is what we are doing.
See here for `How to Recheck`_
Special Considerations For Core Reviewers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Core reviewers have special powers. With great power comes great responsibility
and thus being held to a standard. As a core reviewer, your job is to enable
other people to contribute good code. Under ideal conditions it is more
important to be reviewing other people's code and bugs and fixing bugs than it
is to be writing your own features. Frequently conditions will not be ideal,
but strive to enable others.
When there are open questions that need to be resolved, try to prefer the
`openstack-discuss`_ list over IRC so that anyone can be involved according
to their own schedules and input from unexpected sources can be available.
Writing Code
------------
This document cannot enumerate all the many ways to write good Python code.
Instead it lists some guidelines that, if followed, will help make sure your
code is reviewed promptly and merges quickly. As with everything else in this
document, these guidelines will evolve over time and may be violated for
special circumstances. If you have questions, ask.
See :doc:`/contributor/index` for an overview of Placement and how the various
pieces fit together.
* Divide your change into a series of commits each of which encapsulates a
single unit of functionality but still results in a working service. Smaller
changes are easier to review.
* If your change is to the HTTP API, familiarize yourself with
:ref:`microversion process`.
* If there is a series of changes leading to an HTTP API change, exposing that
API change should be the last patch in the series. That patch must update the
API_ reference and include a `release note`_.
* Changes must include tests. There is a separate document on
:doc:`/contributor/testing`.
* Run ``tox`` before submitting your code to gerrit_. This will run unit and
functional tests in both Python 2 and Python 3, and pep8 style checks.
Placement tests, including functional, are fast, so this should not be too
much of a hardship. By running the tests locally you avoid wasting scarce
resources in the CI system.
* Keep the tests fast. Avoid sleeps, network connections, and external
processes in the tests.
* Keep Placement fast. There is a ``placement-perfload`` job that runs with
every patch. Within that is a log file, ``/logs/placement-perf.txt[.gz]``
that gives rough timing information for a common operation. We want those
numbers to stay small.
* We follow the code formatting guidelines of `PEP 8`_. Check your code with
``tox -epep8`` (for all files) or ``tox -efast8`` (for just the files you
changed). You will not always agree with the advice provided. Follow it.
* Where possible avoid using the visual indent style. Using it can make future
changes unnecessarily difficult. This guideline is not enforced by pep8 and
has been used throughout the code in the past. There's no need to fix old
use. Instead of this
.. code-block:: python
return_value = self.some_method(arg1, arg2,
arg3, arg4)
prefer this
.. code-block:: python
return_value = self.some_method(
arg1, arg2, arg3, arg4)
New Features
------------
New functionality in Placement is developed as needed to meet new use cases or
improve the handling of existing use cases. As a service used by other services
in OpenStack, uses cases often originate in those other services. Considerable
collaboration with other projects is often required to determine if any changes
are needed in the Placement API_ or elsewhere in the project. That interaction
should happen in the usual ways: At Project Team Gatherings, on the
openstack-discuss_ list, and in IRC.
Create a new bug as described in :ref:`new_bug` above.
If a spec is required there are some guidelines for creating one:
* A file should be created in the `placement code`_ in
``doc/source/specs//approved`` with a filename beginning with the
identifier of the bug. For example::
docs/source/specs/train/approved/200056-infinite-resource-classes.rst
More details on how to write a spec are included in a ``template.rst`` file
found in the ``doc/source/specs`` directory. This may be copied to use as the
starting point for a new spec.
* Under normal circumstances specs should be proposed near the beginning of a
release cycle so there is sufficient time to review the spec and its
implementation as well as to make any necessary decisions about limiting the
number of specs being worked in the same cycle. Unless otherwise announced at
the beginning of a cycle, specs should merge before milestone-2 to be
considered relevant for that cycle. Exceptions will be reviewed on a case by
case basis. See the `stein schedule`_ for an example schedule.
* Work items that are described in a spec should be reflected as tasks
created on the originating launchpad bug. Update the bug with additional
tasks as they are discovered. Most new tasks will not require updating the
spec.
* If, when developing a feature, the implementation significantly diverges from
the spec, the spec should be updated to reflect the new reality. This should
not be considered exceptional: It is normal for there to be learning during
the development process which impacts the solution.
* Though specs are presented with the Placement documentation and can usefully
augment end-user documentation, they are not a substitute. Development of a
new feature is not complete without documentation.
When a spec was approved in a previous release cycle, but was not finished, it
should be re-proposed (via gerrit) to the current cycle. Include
``Previously-Approved: `` in the commit message to highlight that fact.
If there have been no changes, core reviewers should feel free to fast-approve
(only one ``+2`` required) the change.
Project Team Lead Duties
------------------------
PTL duties are enumerated in the `PTL guide`_.
.. _Project Team Guide: https://docs.openstack.org/project-team-guide/
.. _openstack-discuss: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
.. _list archive: http://lists.openstack.org/pipermail/openstack-discuss/
.. _Launchpad: https://bugs.launchpad.net/placement
.. _new bug: https://bugs.launchpad.net/placement/+filebug
.. _gerrit: http://review.opendev.org/
.. _How to Review Changes the OpenStack Way: https://docs.openstack.org/project-team-guide/review-the-openstack-way.html
.. _core reviewers: https://review.opendev.org/#/admin/groups/1936,members
.. _constructive: https://governance.openstack.org/tc/reference/principles.html#we-value-constructive-peer-review
.. _API: https://docs.openstack.org/api-ref/placement/
.. _placement code: https://opendev.org/openstack/placement
.. _stein schedule: https://releases.openstack.org/stein/schedule.html
.. _release note: https://docs.openstack.org/reno/latest/
.. _PEP 8: https://www.python.org/dev/peps/pep-0008/
.. _PTL guide: https://docs.openstack.org/project-team-guide/ptl.html
.. _openstack launchpad doc: https://docs.openstack.org/contributors/common/task-tracking.html#launchpad
.. _How to Recheck: https://docs.openstack.org/project-team-guide/testing.html#how-to-handle-test-failures
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/contributor/goals.rst 0000664 0000000 0000000 00000004326 15132464062 0025152 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
=====
Goals
=====
Like many OpenStack projects, placement uses blueprints and specifications to
plan and design upcoming work. Sometimes, however, certain types of work fit
more in the category of wishlist, or when-we-get-around-to-it. These types of
work are often not driven by user or operator feature requests, but are instead
related to architectural, maintenance, and technical debt management goals that
will make the lives of contributors to the project easier over time. In those
cases a specification is too formal and detailed but it is still worthwhile to
remember the idea and put it somewhere. That's what this document is for: a
place to find and put goals for placement that are related to making
contribution more pleasant and keep the project and product healthy, yet are
too general to be considered feature requests.
This document can also operate as one of several sources of guidance on how not
to stray too far from the long term vision of placement.
Don't Use Global Config
-----------------------
Placement uses `oslo.config`_ to manage configuration, passing a reference to
an ``oslo_config.cfg.ConfigOpts`` as required. Before things `were changed`_ a
global was used instead. Placement inherited this behavior from nova, where
using a global ``CONF`` is the normal way to interact with the configuration
options. Continuing this pattern in placement made it difficult for nova to use
externalized placement in its functional tests, so the situation was changed.
We'd like to keep it this way as it makes the code easier to maintain.
.. _oslo.config: https://docs.openstack.org/oslo.config
.. _were changed: https://review.opendev.org/#/c/619121/
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/contributor/index.rst 0000664 0000000 0000000 00000003151 15132464062 0025147 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
===========================
Placement Developer Notes
===========================
The Nova project introduced the placement service as part of the Newton
release, and it was extracted to its own repository in the Stein release. The
service provides an HTTP API to manage inventories of different classes of
resources, such as disk or virtual cpus, made available by entities called
resource providers. Information provided through the placement API is intended
to enable more effective accounting of resources in an OpenStack deployment and
better scheduling of various entities in the cloud.
The document serves to explain the architecture of the system and to provide
some guidance on how to maintain and extend the code. For more detail on why
the system was created and how it does its job see :doc:`/index`. For some
insight into the longer term goals of the system see :doc:`goals` and
:doc:`vision-reflection`.
.. toctree::
:maxdepth: 2
contributing
architecture
api-ref-guideline
goals
quick-dev
testing
vision-reflection
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/contributor/quick-dev.rst 0000664 0000000 0000000 00000015617 15132464062 0025742 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
===========================
Quick Placement Development
===========================
.. note:: This is one of many ways to achieve a simple *live* development
environment for the placement service. This isn't meant to be the
best way, or the only way. Its purpose is more to demonstrate the
steps involved, so that people can learn from those steps and choose
to assemble them in whatever ways work best for them.
This content was originally written in a `blog post
`_, which
perhaps explains its folksy tone.
Here are some instructions on how to spin up the placement wsgi script with
uwsgi and a stubbed out ``placement.conf``, in case you want to see what
happens. The idea here is that you want to experiment with the current
placement code, using a live database, but you're not concerned with other
services, don't want to deal with devstack, but need a level of interaction
with the code and process that something like `placedock
`_ can't provide.
*As ever, even all of the above has lots of assumptions about experience and
context. This document assumes you are someone who either is an OpenStack (and
probably placement) developer, or would like to be one.*
To make this go you need a unix-like OS, with a python3 dev environment, and
git and mysql (or postgresql) installed. We'll be doing this work from within a
virtualenv, built from the ``tox.ini`` in the placement code.
Get The Code
============
The placement code lives at
https://opendev.org/openstack/placement . We want to clone that::
git clone https://opendev.org/openstack/placement
cd placement
Setup The Database
==================
We need to 1) create the database, 2) create a virtualenv to have the command,
3) use it to create the tables.
The database can have whatever name you like. Whatever you choose, use it
throughout this process. We choose ``placement``. You may need a user and
password to talk to your database, setting that up is out of scope for this
document::
mysql -uroot -psecret -e "DROP DATABASE IF EXISTS placement;"
mysql -uroot -psecret -e "CREATE DATABASE placement CHARACTER SET utf8;"
You may also need to set permissions::
mysql -uroot -psecret \
-e "GRANT ALL PRIVILEGES ON placement.* TO 'root'@'%' identified by 'secret';"
Create a bare minimum placement.conf in the ``/etc/placement``
directory (which you may need to create)::
[placement_database]
connection = mysql+pymysql://root:secret@127.0.0.1/placement?charset=utf8
.. note:: You may choose the location of the configuration file on the command
line when using the ``placement-manage`` command.
Make the ``placement-manage`` command available by updating a virtualenv::
tox -epy36 --notest
Run the command to create the tables::
.tox/py36/bin/placement-manage db sync
You can confirm the tables are there with ``mysqlshow placement``
Run The Service
===============
Now we want to run the service. We need to update ``placement.conf`` so it will
produce debugging output and use the ``noauth`` strategy for authentication (so
we don't also have to run Keystone). Make ``placement.conf`` look like this
(adjusting for your database settings)::
[DEFAULT]
debug = True
[placement_database]
connection = mysql+pymysql://root:secret@127.0.0.1/placement?charset=utf8
[api]
auth_strategy = noauth2
We need to install the uwsgi package into the virtualenv::
.tox/py36/bin/pip install uwsgi
And then use uwsgi to run the service. Start it with::
.tox/py36/bin/uwsgi --http :8000 --wsgi-file .tox/py36/bin/placement-api --processes 2 --threads 10
.. note:: Adjust ``processes`` and ``threads`` as required. If you do not
provide these arguments the server will be a single process and
thus perform poorly.
If that worked you'll see lots of debug output and ``spawned uWSGI worker``.
Test that things are working from another terminal with curl::
curl -v http://localhost:8000/
Get a list of resource providers with (the ``x-auth-token`` header is
required, ``openstack-api-version`` is optional but makes sure we are getting
the latest functionality)::
curl -H 'x-auth-token: admin' \
-H 'openstack-api-version: placement latest' \
http://localhost:8000/resource_providers
The result ought to look something like this::
{"resource_providers": []}
If it doesn't then something went wrong with the above and there should be more
information in the terminal where ``uwsgi`` is running.
From here you can experiment with creating resource providers and related
placement features. If you change the placement code, ``ctrl-c`` to kill the
uwsgi process and start it up again. For testing, you might enjoy
`placecat `_.
Here's all of the above as single script. As stated above this is for
illustrative purposes. You should make your own::
#!/bin/bash
set -xe
# Change these as required
CONF_DIR=/etc/placement
DB_DRIVER=mysql+pymysql # we assume mysql throughout, feel free to change
DB_NAME=placement
DB_USER=root
DB_PASS=secret
REPO=https://opendev.org/openstack/placement
# Create a directory for configuration to live.
[[ -d $CONF_DIR ]] || (sudo mkdir $CONF_DIR && sudo chown $USER $CONF_DIR)
# Establish database. Some of this may need sudo powers. Don't be shy
# about changing the script.
mysql -u$DB_USER -p$DB_PASS -e "DROP DATABASE IF EXISTS $DB_NAME;"
mysql -u$DB_USER -p$DB_PASS -e "CREATE DATABASE $DB_NAME CHARACTER SET utf8;"
mysql -u$DB_USER -p$DB_PASS -e "GRANT ALL PRIVILEGES ON $DB_NAME.* TO '$DB_USER'@'%' IDENTIFIED BY '$DB_PASS';"
# clone the right code
git clone $REPO
cd placement
# establish virtenv
tox -epy36 --notest
# write placement.conf
cat< $CONF_DIR/placement.conf
[DEFAULT]
debug = True
[placement_database]
connection = $DB_DRIVER://${DB_USER}:${DB_PASS}@127.0.0.1/${DB_NAME}?charset=utf8
[api]
auth_strategy = noauth2
EOF
# Create database tables
.tox/py36/bin/placement-manage db sync
# install uwsgi
.tox/py36/bin/pip install uwsgi
# run uwsgi
.tox/py36/bin/uwsgi --http :8000 --wsgi-file .tox/py36/bin/placement-api --processes 2 --threads 10
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/contributor/testing.rst 0000664 0000000 0000000 00000020000 15132464062 0025505 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
===================
Testing Placement
===================
Most of the handler code in the placement API is tested using `gabbi`_. Some
utility code is tested with unit tests found in ``placement/tests/unit``. The
back-end objects are tested with a combination of unit and functional tests
found in ``placement/tests/unit/objects`` and
``placement/tests/functional/db``.
When writing tests for handler code (that is, the code found in
``placement/handlers``) a good rule of thumb is that if you feel like there
needs to be a unit test for some of the code in the handler, that is a good
sign that the piece of code should be extracted to a separate method. That
method should be independent of the handler method itself (the one decorated by
the ``wsgify`` method) and testable as a unit, without mocks if possible. If
the extracted method is useful for multiple resources consider putting it in
the ``util`` package.
As a general guide, handler code should be relatively short and where there are
conditionals and branching, they should be reachable via the gabbi functional
tests. This is merely a design goal, not a strict constraint.
Using Gabbi
-----------
Gabbi was developed in the `telemetry`_ project to provide a declarative way to
test HTTP APIs that preserves visibility of both the request and response of
the HTTP interaction. Tests are written in YAML files where each file is an
ordered suite of tests. Fixtures (such as a database) are set up and torn down
at the beginning and end of each file, not each test. JSON response bodies can
be evaluated with `JSONPath`_. The placement WSGI application is run via
`wsgi-intercept`_, meaning that real HTTP requests are being made over a file
handle that appears to Python to be a socket.
In the placement API the YAML files (aka "gabbits") can be found in
``placement/tests/functional/gabbits``. Fixture definitions are in
``placement/tests/functional/fixtures/gabbits.py``. Tests are frequently
grouped by handler name (e.g., ``resource-provider.yaml`` and
``inventory.yaml``). This is not a requirement and as we increase the number of
tests it makes sense to have more YAML files with fewer tests, divided up by
the arc of API interaction that they test.
The gabbi tests are integrated into the functional tox target, loaded via
``placement/tests/functional/test_api.py``. If you
want to run just the gabbi tests one way to do so is::
tox -efunctional test_api
If you want to run just one yaml file (in this example ``inventory.yaml``)::
tox -efunctional api.inventory
It is also possible to run just one test from within one file. When you do this
every test prior to the one you asked for will also be run. This is because
the YAML represents a sequence of dependent requests. Select the test by using
the name in the yaml file, replacing space with ``_``::
tox -efunctional api.inventory_post_new_ipv4_address_inventory
.. note:: ``tox.ini`` in the placement repository is configured by a
``group_regex`` so that each gabbi YAML is considered a group. Thus,
all tests in the file will be run in the same process when running
stestr concurrently (the default).
Writing More Gabbi Tests
------------------------
The docs for `gabbi`_ try to be complete and explain the `syntax`_ in some
depth. Where something is missing or confusing, please log a `bug`_.
While it is possible to test all aspects of a response (all the response
headers, the status code, every attribute in a JSON structure) in one single
test, doing so will likely make the test harder to read and will certainly make
debugging more challenging. If there are multiple things that need to be
asserted, making multiple requests is reasonable. Since database set up is only
happening once per file (instead of once per test) and since there is no TCP
overhead, the tests run quickly.
While `fixtures`_ can be used to establish entities that are required for
tests, creating those entities via the HTTP API results in tests which are more
descriptive. For example the ``inventory.yaml`` file creates the resource
provider to which it will then add inventory. This makes it easy to explore a
sequence of interactions and a variety of responses with the tests:
* create a resource provider
* confirm it has empty inventory
* add inventory to the resource provider (in a few different ways)
* confirm the resource provider now has inventory
* modify the inventory
* delete the inventory
* confirm the resource provider now has empty inventory
Nothing special is required to add a new set of tests: create a YAML file with
a unique name in the same directory as the others. The other files can provide
examples. Gabbi can provide a useful way of doing test driven development of a
new handler: create a YAML file that describes the desired URLs and behavior
and write the code to make it pass.
It's also possible to use gabbi against a running placement service, for
example in devstack. See `gabbi-run`_ to get started. If you don't want to
go to the trouble of using devstack, but do want a live server see
:doc:`quick-dev`.
Profiling
---------
If you wish to profile requests to the placement service, to get an idea of
which methods are consuming the most CPU or are being used repeatedly, it is
possible to enable a ProfilerMiddleware_ to output per-request python profiling
dumps. The environment (:doc:`quick-dev` is a good place to start) in which
the service is running will need to have Werkzeug_ added.
* If the service is already running, stop it.
* Install Werkzeug.
* Set an environment variable, ``OS_WSGI_PROFILER``, to a directory where
profile results will be written.
* Make sure the directory exists.
* Start the service, ensuring the environment variable is passed to it.
* Make an HTTP request that exercises the code you wish to profile.
The profiling results will be in the directory named by ``OS_WSGI_PROFILER``.
There are many ways to analyze the files. See `Profiling WSGI Apps`_ for an
example.
Profiling with OSProfiler
-------------------------
To use `OSProfiler`_ with placement:
* Add a [profiler] section to the placement.conf:
.. code-block:: ini
[profiler]
connection_string = mysql+pymysql://root:admin@127.0.0.1/osprofiler?charset=utf8
hmac_keys = my-secret-key
enabled = True
* Include the hmac_keys in your API request:
.. code-block:: console
$ openstack resource provider list --os-profile my-secret-key
The openstack client will return the trace id:
.. code-block:: console
Trace ID: 67428cdd-bfaa-496f-b430-507165729246
* Extract the trace in html format:
.. code-block:: console
$ osprofiler trace show --html 67428cdd-bfaa-496f-b430-507165729246 \
--connection-string mysql+pymysql://root:admin@127.0.0.1/osprofiler?charset=utf8
.. _bug: https://github.com/cdent/gabbi/issues
.. _fixtures: http://gabbi.readthedocs.io/en/latest/fixtures.html
.. _gabbi: https://gabbi.readthedocs.io/
.. _gabbi-run: http://gabbi.readthedocs.io/en/latest/runner.html
.. _JSONPath: http://goessner.net/articles/JsonPath/
.. _ProfilerMiddleware: https://werkzeug.palletsprojects.com/en/master/middleware/profiler/
.. _Profiling WSGI Apps: https://anticdent.org/profiling-wsgi-apps.html
.. _syntax: https://gabbi.readthedocs.io/en/latest/format.html
.. _telemetry: http://specs.openstack.org/openstack/telemetry-specs/specs/kilo/declarative-http-tests.html
.. _Werkzeug: https://palletsprojects.com/p/werkzeug/
.. _wsgi-intercept: http://wsgi-intercept.readthedocs.io/
.. _OSProfiler: https://docs.openstack.org/osprofiler/latest/
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/contributor/vision-reflection.rst 0000664 0000000 0000000 00000005667 15132464062 0027515 0 ustar 00root root 0000000 0000000 =================
Vision Reflection
=================
In late-2018, the OpenStack Technical Committee composed a
`technical vision `_
of what OpenStack clouds should look like. This document compares the state of
placement relative to that vision to provide some guidance on broad stroke ways
in which placement may need to change to match the vision.
Since placement is primarily a back-end and admin-only system (at least for
now), many aspects of the vision document do not apply, but it is still a
useful exercise.
Note that there is also a placement :doc:`goals` document.
The vision document is divided into three sections, which this document
mirrors. This should be a living document which evolves as placement itself
evolves.
The Pillars of Cloud
====================
The sole interface to the placement service is an HTTP API, meaning that in
theory, anything can talk to it, enabling the self-service and application
control that define a cloud. However, at the moment the data managed by
placement is considered for administrators only. This policy could be changed,
but doing so would be a dramatic adjustment in the scope of who placement is
for and what it does. Since placement has not yet fully satisfied its original
vision to clarify and ease cloud resource allocation such a change should be
considered secondary to completing the original goals.
OpenStack-specific Considerations
=================================
Placement uses microversions to help manage interoperability and bi-directional
compatibility. Because placement has used microversions from the very start a
great deal of the valuable functionality is only available in an opt-in
fashion. In fact, it would be accurate to say that a placement service at the
default microversion is incapable of being a placement service. We may wish to
evaluate (and publish) if there is a minimum microversion at which placement is
useful. To some extent this is already done with the way nova requires specific
placement microversions, and for placement to be upgraded in advance of nova.
As yet, placement provides no dedicated mechanism for partitioning its resource
providers amongst regions. Aggregates can be used for this purpose but this is
potentially cumbersome in the face of multi-region use cases where a single
placement service is used to manage resources in several clouds. This is an
area that is already under consideration, and would bring placement closer to
matching the "partitioning" aspect of the vision document.
Design Goals
============
Placement already maps well to several of the design goals in the vision
document, adhering to fairly standard methods for scalability, reliability,
customization, and flexible utilization models. It does this by being a simple
web app over a database and not much more. We should strive to keep this.
Details of how we plan to do so should be maintained in the :doc:`goals`
document.
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/index.rst 0000664 0000000 0000000 00000004723 15132464062 0022603 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
===========
Placement
===========
The placement API service was introduced in the 14.0.0 Newton release within
the nova repository and extracted to the `placement repository`_ in the 19.0.0
Stein release. This is a REST API stack and data model used to track resource
provider inventories and usages, along with different classes of resources.
For example, a resource provider can be a compute node, a shared storage pool,
or an IP allocation pool. The placement service tracks the inventory and usage
of each provider. For example, an instance created on a compute node may be a
consumer of resources such as RAM and CPU from a compute node resource
provider, disk from an external shared storage pool resource provider and IP
addresses from an external IP pool resource provider.
The types of resources consumed are tracked as **classes**. The service
provides a set of standard resource classes (for example ``DISK_GB``,
``MEMORY_MB``, and ``VCPU``) and provides the ability to define custom
resource classes as needed.
Each resource provider may also have a set of traits which describe qualitative
aspects of the resource provider. Traits describe an aspect of a resource
provider that cannot itself be consumed but a workload may wish to specify. For
example, available disk may be solid state drives (SSD).
.. _placement repository: https://opendev.org/openstack/placement
Usages
======
.. toctree::
:maxdepth: 2
user/index
Command Line Interface
======================
.. toctree::
:maxdepth: 2
cli/index
Configuration
=============
.. toctree::
:maxdepth: 2
configuration/index
Contribution
============
.. toctree::
:maxdepth: 2
contributor/index
Specifications
==============
.. toctree::
:maxdepth: 2
specs/index
Deployment
==========
.. toctree::
:maxdepth: 2
install/index
Administrator Guide
===================
.. toctree::
:maxdepth: 2
admin/index
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/install/ 0000775 0000000 0000000 00000000000 15132464062 0022402 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/install/from-pypi.rst 0000664 0000000 0000000 00000021243 15132464062 0025060 0 ustar 00root root 0000000 0000000 Install and configure Placement from PyPI
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The section describes how to install and configure the placement service using
packages from PyPI_. Placement works with Python version 2.7, but version 3.6
or higher is recommended.
This document assumes you have a working MySQL server and a working Python
environment, including the :ref:`about-pip` package installer. Depending on
your environment, you may wish to install placement in a virtualenv_.
This document describes how to run placement with uwsgi_ as its web server.
This is but one of many different ways to host the service. Placement is a
well-behaved WSGI_ application so should be straightforward to host with any
WSGI server.
If using placement in an OpenStack environment, you will need to ensure it is
up and running before starting services that use it but after services it uses.
That means after Keystone_, but before anything else.
Prerequisites
-------------
Before installing the service, you will need to create the database, service
credentials, and API endpoints, as described in the following sections.
.. _about-pip:
pip
^^^
Install `pip `_ from PyPI_.
.. note:: Examples throughout this reference material use the ``pip`` command.
This may need to be pathed or spelled differently (e.g. ``pip3``)
depending on your installation and Python version.
python-openstackclient
^^^^^^^^^^^^^^^^^^^^^^
If not already installed, install the ``openstack`` command line tool:
.. code-block:: console
# pip install python-openstackclient
.. _create-database-pypi:
Create Database
^^^^^^^^^^^^^^^
Placement is primarily tested with MySQL/MariaDB so that is what is described
here. It also works well with PostgreSQL and likely with many other databases
supported by sqlalchemy_.
To create the database, complete these steps:
.. TODO(cdent): Extract this to a shared document for all the install docs.
#. Use the database access client to connect to the database server as the
``root`` user or by using ``sudo`` as appropriate:
.. code-block:: console
# mysql
#. Create the ``placement`` database:
.. code-block:: console
MariaDB [(none)]> CREATE DATABASE placement;
#. Grant proper access to the database:
.. code-block:: console
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \
IDENTIFIED BY 'PLACEMENT_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \
IDENTIFIED BY 'PLACEMENT_DBPASS';
Replace ``PLACEMENT_DBPASS`` with a suitable password.
#. Exit the database access client.
.. _configure-endpoints-pypi:
Configure User and Endpoints
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note:: If you are not using Keystone, you can skip the steps below but will
need to configure the :oslo.config:option:`api.auth_strategy` setting
with a value of ``noauth2``. See also :doc:`/contributor/quick-dev`.
.. note:: You will need to authenticate to Keystone as an ``admin`` before
making these calls. There are many different ways to do this,
depending on how your system was set up. If you do not have an
``admin-openrc`` file, you will have something similar.
.. important:: These documents use an endpoint URL of
``http://controller:8778/`` as an example only. You should
configure placement to use whatever hostname and port works best
for your environment. Using SSL on the default port, with either
a domain or path specific to placement, is recommended. For
example: ``https://mygreatcloud.com/placement`` or
``https://placement.mygreatcloud.com/``.
.. include:: shared/endpoints.rst
.. _configure-conf-pypi:
Install and configure components
--------------------------------
The default location of the placement configuration file is
``/etc/placement/placement.conf``. A different directory may be chosen by
setting ``OS_PLACEMENT_CONFIG_DIR`` in the environment. It is also possible to
run the service with a partial or no configuration file and set some options
in `the environment`_. See :doc:`/configuration/index` for additional
configuration settings not mentioned here.
.. note:: In the steps below, ``controller`` is used as a stand in for the
hostname of the hosts where keystone, mysql, and placement are
running. These may be distinct. The keystone host (used for
``auth_url`` and ``www_authenticate_uri``) should be the unversioned
public endpoint for the Identity service.
.. TODO(cdent): Some of these database steps could be extracted to a shared
document used by all the install docs.
#. Install placement and required database libraries:
.. code-block:: console
# pip install openstack-placement pymysql
#. Create the ``/etc/placement/placement.conf`` file and complete the following
actions:
* Create a ``[placement_database]`` section and configure database access:
.. path /etc/placement/placement.conf
.. code-block:: ini
[placement_database]
connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement
Replace ``PLACEMENT_DBPASS`` with the password you chose for the placement
database.
* Create ``[api]`` and ``[keystone_authtoken]`` sections, configure Identity
service access:
.. path /etc/placement/placement.conf
.. code-block:: ini
[api]
auth_strategy = keystone # use noauth2 if not using keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = PLACEMENT_PASS
Replace ``PLACEMENT_PASS`` with the password you chose for the
``placement`` user in the Identity service.
.. note::
The value of ``user_name``, ``password``, ``project_domain_name`` and
``user_domain_name`` need to be in sync with your keystone config.
* You may wish to set the :oslo.config:option:`debug` option to ``True`` to
produce more verbose log output.
#. Populate the ``placement`` database:
.. code-block:: console
$ placement-manage db sync
.. note:: An alternative is to use the
:oslo.config:option:`placement_database.sync_on_startup` option.
Finalize installation
---------------------
Now that placement itself has been installed we need to launch the service in a
web server. What follows provides a very basic web server that, while
relatively performant, is not set up to be easy to manage. Since there are many
web servers and many ways to manage them, such things are outside the scope of
this document.
Install and run the web server:
#. Install the ``uwsgi`` package (these instructions are against version
2.0.18):
.. code-block:: console
# pip install uwsgi
#. Run the server with the placement WSGI application in a terminal window:
.. warning:: Make sure you are using the correct ``uwsgi`` binary. It may
be in multiple places in your path. The wrong version will
fail and complain about bad arguments.
.. code-block:: console
# uwsgi -M --http :8778 --wsgi-file /usr/local/bin/placement-api \
--processes 2 --threads 10
#. In another terminal confirm the server is running using ``curl``. The URL
should match the public endpoint set in :ref:`configure-endpoints-pypi`.
.. code-block:: console
$ curl http://controller:8778/
The output will look something like this:
.. code-block:: json
{
"versions" : [
{
"id" : "v1.0",
"max_version" : "1.31",
"links" : [
{
"href" : "",
"rel" : "self"
}
],
"min_version" : "1.0",
"status" : "CURRENT"
}
]
}
Further interactions with the system can be made with osc-placement_.
.. _PyPI: https://pypi.org
.. _virtualenv: https://pypi.org/project/virtualenv/
.. _uwsgi: https://uwsgi-docs.readthedocs.io/en/latest/WSGIquickstart.html
.. _WSGI: https://www.python.org/dev/peps/pep-3333/
.. _Keystone: https://docs.openstack.org/keystone/latest/
.. _sqlalchemy: https://www.sqlalchemy.org
.. _the environment: https://docs.openstack.org/oslo.config/latest/reference/drivers.html#module-oslo_config.sources._environment
.. _osc-placement: https://docs.openstack.org/osc-placement/latest/
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/install/index.rst 0000664 0000000 0000000 00000014103 15132464062 0024242 0 ustar 00root root 0000000 0000000 ============
Installation
============
.. note:: Before the Stein release the placement code was in Nova alongside
the compute REST API code (nova-api). Make sure that the release
version of this document matches the release version you want to
deploy.
Steps Overview
--------------
This subsection gives an overview of the process without going into detail
on the methods used.
**1. Deploy the API service**
Placement provides a ``placement-api`` WSGI script for running the service with
Apache, nginx or other WSGI-capable web servers. Depending on what packaging
solution is used to deploy OpenStack, the WSGI script may be in ``/usr/bin``
or ``/usr/local/bin``.
``placement-api``, as a standard WSGI script, provides a module level
``application`` attribute that most WSGI servers expect to find. This means it
is possible to run it with lots of different servers, providing flexibility in
the face of different deployment scenarios. Common scenarios include:
* apache2_ with mod_wsgi_
* apache2 with mod_proxy_uwsgi_
* nginx_ with uwsgi_
* nginx with gunicorn_
In all of these scenarios the host, port and mounting path (or prefix) of the
application is controlled in the web server's configuration, not in the
configuration (``placement.conf``) of the placement application.
When placement was `first added to DevStack`_ it used the ``mod_wsgi`` style.
Later it `was updated`_ to use mod_proxy_uwsgi_. Looking at those changes can
be useful for understanding the relevant options.
DevStack is configured to host placement at ``/placement`` on either the
default port for http or for https (``80`` or ``443``) depending on whether TLS
is being used. Using a default port is desirable.
By default, the placement application will get its configuration for settings
such as the database connection URL from ``/etc/placement/placement.conf``.
The directory the configuration file will be found in can be changed by setting
``OS_PLACEMENT_CONFIG_DIR`` in the environment of the process that starts the
application. With recent releases of ``oslo.config``, configuration options may
also be set in the environment_.
.. note:: When using uwsgi with a front end (e.g., apache2 or nginx) something
needs to ensure that the uwsgi process is running. In DevStack this is done
with systemd_. This is one of many different ways to manage uwsgi.
This document refrains from declaring a set of installation instructions for
the placement service. This is because a major point of having a WSGI
application is to make the deployment as flexible as possible. Because the
placement API service is itself stateless (all state is in the database), it is
possible to deploy as many servers as desired behind a load balancing solution
for robust and simple scaling. If you familiarize yourself with installing
generic WSGI applications (using the links in the common scenarios list,
above), those techniques will be applicable here.
.. _apache2: http://httpd.apache.org/
.. _mod_wsgi: https://modwsgi.readthedocs.io/
.. _mod_proxy_uwsgi: http://uwsgi-docs.readthedocs.io/en/latest/Apache.html
.. _nginx: http://nginx.org/
.. _uwsgi: http://uwsgi-docs.readthedocs.io/en/latest/Nginx.html
.. _gunicorn: http://gunicorn.org/
.. _first added to DevStack: https://review.opendev.org/#/c/342362/
.. _was updated: https://review.opendev.org/#/c/456717/
.. _systemd: https://review.opendev.org/#/c/448323/
.. _environment: https://docs.openstack.org/oslo.config/latest/reference/drivers.html#environment
**2. Synchronize the database**
The placement service uses its own database, defined in the
:oslo.config:group:`placement_database` section of configuration. The
:oslo.config:option:`placement_database.connection` option **must** be set or
the service will not start. The command line tool :doc:`/cli/placement-manage`
can be used to migrate the database tables to their correct form, including
creating them. The database described by the ``connection`` option must
already exist and have appropriate access controls defined.
Another option for synchronization is to set
:oslo.config:option:`placement_database.sync_on_startup` to ``True`` in
configuration. This will perform any missing database migrations as the
placement web service starts. Whether you choose to sync automaticaly or use
the command line tool depends on the constraints of your environment and
deployment tooling.
**3. Create accounts and update the service catalog**
Create a **placement** service user with an **admin** role in Keystone.
The placement API is a separate service and thus should be registered under
a **placement** service type in the service catalog. Clients of placement, such
as the resource tracker in the nova-compute node, will use the service catalog
to find the placement endpoint.
See :ref:`configure-endpoints-pypi` for examples of creating the service user
and catalog entries.
Devstack sets up the placement service on the default HTTP port (80) with a
``/placement`` prefix instead of using an independent port.
Installation Packages
---------------------
This section provides instructions on installing placement from Linux
distribution packages.
.. warning:: These installation documents are a work in progress. Some of the
distribution packages mentioned are not yet available so the
instructions **will not work**.
The placement service provides an `HTTP API`_ used to track resource provider
inventories and usages. More detail can be found at the :doc:`placement
overview `.
Placement operates as a web service over a data model. Installation involves
creating the necessary database and installing and configuring the web service.
This is a straightforward process, but there are quite a few steps to integrate
placement with the rest of an OpenStack cloud.
.. note:: Placement is required by some of the other OpenStack services,
notably nova, therefore it should be installed before those other
services but after Identity (keystone).
.. toctree::
:maxdepth: 1
from-pypi.rst
install-rdo.rst
install-ubuntu.rst
verify.rst
.. _HTTP API: https://docs.openstack.org/api-ref/placement/
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/install/install-rdo.rst 0000664 0000000 0000000 00000006401 15132464062 0025365 0 ustar 00root root 0000000 0000000 Install and configure Placement for Red Hat Enterprise Linux and CentOS Stream
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the placement service
when using Red Hat Enterprise Linux or CentOS Stream packages.
Prerequisites
-------------
Before you install and configure the placement service, you must create
a database, service credentials, and API endpoints.
Create Database
^^^^^^^^^^^^^^^
#. To create the database, complete these steps:
* Use the database access client to connect to the database server
as the ``root`` user:
.. code-block:: console
$ mysql -u root -p
* Create the ``placement`` database:
.. code-block:: console
MariaDB [(none)]> CREATE DATABASE placement;
* Grant proper access to the database:
.. code-block:: console
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \
IDENTIFIED BY 'PLACEMENT_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \
IDENTIFIED BY 'PLACEMENT_DBPASS';
Replace ``PLACEMENT_DBPASS`` with a suitable password.
* Exit the database access client.
Configure User and Endpoints
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. include:: shared/endpoints.rst
Install and configure components
--------------------------------
.. include:: note_configuration_vary_by_distribution.rst
#. Install the packages:
.. code-block:: console
# dnf install openstack-placement-api
#. Edit the ``/etc/placement/placement.conf`` file and complete the following
actions:
* In the ``[placement_database]`` section, configure database access:
.. path /etc/placement/placement.conf
.. code-block:: ini
[placement_database]
# ...
connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement
Replace ``PLACEMENT_DBPASS`` with the password you chose for the placement
database.
* In the ``[api]`` and ``[keystone_authtoken]`` sections, configure Identity
service access:
.. path /etc/placement/placement.conf
.. code-block:: ini
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = PLACEMENT_PASS
Replace ``PLACEMENT_PASS`` with the password you chose for the
``placement`` user in the Identity service.
.. note::
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
.. note::
The value of ``user_name``, ``password``, ``project_domain_name`` and
``user_domain_name`` need to be in sync with your keystone config.
#. Populate the ``placement`` database:
.. code-block:: console
# su -s /bin/sh -c "placement-manage db sync" placement
.. note::
Ignore any deprecation messages in this output.
Finalize installation
---------------------
* Restart the httpd service:
.. code-block:: console
# systemctl restart httpd
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/install/install-ubuntu.rst 0000664 0000000 0000000 00000006270 15132464062 0026127 0 ustar 00root root 0000000 0000000 Install and configure Placement for Ubuntu
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the placement service
when using Ubuntu packages.
Prerequisites
-------------
Before you install and configure the placement service, you must create
a database, service credentials, and API endpoints.
Create Database
^^^^^^^^^^^^^^^
#. To create the database, complete these steps:
* Use the database access client to connect to the database server
as the ``root`` user:
.. code-block:: console
# mysql
* Create the ``placement`` database:
.. code-block:: console
MariaDB [(none)]> CREATE DATABASE placement;
* Grant proper access to the database:
.. code-block:: console
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \
IDENTIFIED BY 'PLACEMENT_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \
IDENTIFIED BY 'PLACEMENT_DBPASS';
Replace ``PLACEMENT_DBPASS`` with a suitable password.
* Exit the database access client.
Configure User and Endpoints
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. include:: shared/endpoints.rst
Install and configure components
--------------------------------
.. include:: note_configuration_vary_by_distribution.rst
#. Install the packages:
.. code-block:: console
# apt install placement-api
#. Edit the ``/etc/placement/placement.conf`` file and complete the following
actions:
* In the ``[placement_database]`` section, configure database access:
.. path /etc/placement/placement.conf
.. code-block:: ini
[placement_database]
# ...
connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement
Replace ``PLACEMENT_DBPASS`` with the password you chose for the placement
database.
* In the ``[api]`` and ``[keystone_authtoken]`` sections, configure Identity
service access:
.. path /etc/placement/placement.conf
.. code-block:: ini
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = PLACEMENT_PASS
Replace ``PLACEMENT_PASS`` with the password you chose for the
``placement`` user in the Identity service.
.. note::
Comment out or remove any other options in the ``[keystone_authtoken]``
section.
.. note::
The value of ``user_name``, ``password``, ``project_domain_name`` and
``user_domain_name`` need to be in sync with your keystone config.
#. Populate the ``placement`` database:
.. code-block:: console
# su -s /bin/sh -c "placement-manage db sync" placement
.. note::
Ignore any deprecation messages in this output.
Finalize installation
---------------------
* Reload the web server to adjust to get new configuration settings for
placement.
.. code-block:: console
# service apache2 restart
note_configuration_vary_by_distribution.rst 0000664 0000000 0000000 00000000463 15132464062 0033306 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/install .. note::
Default configuration files vary by distribution. You might need to add
these sections and options rather than modifying existing sections and
options. Also, an ellipsis (``...``) in the configuration snippets indicates
potential default configuration options that you should retain.
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/install/shared/ 0000775 0000000 0000000 00000000000 15132464062 0023650 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/install/shared/endpoints.rst 0000664 0000000 0000000 00000011324 15132464062 0026406 0 ustar 00root root 0000000 0000000
#. Source the ``admin`` credentials to gain access to admin-only CLI commands:
.. code-block:: console
$ . admin-openrc
#. Create a Placement service user using your chosen ``PLACEMENT_PASS``:
.. code-block:: console
$ openstack user create --domain default --password-prompt placement
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | fa742015a6494a949f67629884fc7ec8 |
| name | placement |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
#. Add the Placement user to the service project with the admin role:
.. code-block:: console
$ openstack role add --project service --user placement admin
.. note::
This command provides no output.
#. Create the Placement API entry in the service catalog:
.. code-block:: console
$ openstack service create --name placement \
--description "Placement API" placement
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Placement API |
| enabled | True |
| id | 2d1a27022e6e4185b86adac4444c495f |
| name | placement |
| type | placement |
+-------------+----------------------------------+
#. Create the Placement API service endpoints:
.. note:: Depending on your environment, the URL for the endpoint will vary
by port (possibly 8780 instead of 8778, or no port at all) and
hostname. You are responsible for determining the correct URL.
.. code-block:: console
$ openstack endpoint create --region RegionOne \
placement public http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 2b1b2637908b4137a9c2e0470487cbc0 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 2d1a27022e6e4185b86adac4444c495f |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+
$ openstack endpoint create --region RegionOne \
placement internal http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 02bcda9a150a4bd7993ff4879df971ab |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 2d1a27022e6e4185b86adac4444c495f |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+
$ openstack endpoint create --region RegionOne \
placement admin http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 3d71177b9e0f406f98cbff198d74b182 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 2d1a27022e6e4185b86adac4444c495f |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/install/verify.rst 0000664 0000000 0000000 00000005143 15132464062 0024443 0 ustar 00root root 0000000 0000000 ===================
Verify Installation
===================
Verify operation of the placement service.
.. note:: You will need to authenticate to the identity service as an
``admin`` before making these calls. There are many different ways
to do this, depending on how your system was set up. If you do not
have an ``admin-openrc`` file, you will have something similar.
#. Source the ``admin`` credentials to gain access to admin-only CLI commands:
.. code-block:: console
$ . admin-openrc
#. Perform status checks to make sure everything is in order:
.. code-block:: console
$ placement-status upgrade check
+----------------------------------+
| Upgrade Check Results |
+----------------------------------+
| Check: Missing Root Provider IDs |
| Result: Success |
| Details: None |
+----------------------------------+
| Check: Incomplete Consumers |
| Result: Success |
| Details: None |
+----------------------------------+
The output of that command will vary by release.
See :ref:`placement-status upgrade check ` for
details.
#. Run some commands against the placement API:
* Install the `osc-placement`_ plugin:
.. note:: This example uses `PyPI`_ and :ref:`about-pip` but if you are
using distribution packages you can install the package from
their repository. With the move to python3 you will need to
specify **pip3** or install **python3-osc-placement** from
your distribution.
.. code-block:: console
$ pip3 install osc-placement
* List available resource classes and traits:
.. code-block:: console
$ openstack --os-placement-api-version 1.2 resource class list --sort-column name
+----------------------------+
| name |
+----------------------------+
| DISK_GB |
| IPV4_ADDRESS |
| ... |
$ openstack --os-placement-api-version 1.6 trait list --sort-column name
+---------------------------------------+
| name |
+---------------------------------------+
| COMPUTE_DEVICE_TAGGING |
| COMPUTE_NET_ATTACH_INTERFACE |
| ... |
.. _osc-placement: https://docs.openstack.org/osc-placement/latest/
.. _PyPI: https://pypi.org
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/placement-api-microversion-history.rst 0000664 0000000 0000000 00000001251 15132464062 0030420 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
.. _placement-api-microversion-history:
.. include:: ../../placement/rest_api_version_history.rst
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/ 0000775 0000000 0000000 00000000000 15132464062 0022051 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/2023.1/ 0000775 0000000 0000000 00000000000 15132464062 0022576 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/2023.1/approved/ 0000775 0000000 0000000 00000000000 15132464062 0024416 5 ustar 00root root 0000000 0000000 policy-defaults-improvement.rst 0000664 0000000 0000000 00000007154 15132464062 0032547 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/2023.1/approved ..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
===========================
Policy Defaults Improvement
===========================
https://blueprints.launchpad.net/placement/+spec/policy-defaults-improvement
This spec is to improve the placement APIs policy as the directions
decided in `RBAC community-wide goal
`_
Problem description
===================
While discussing the new RBAC (scope_type and project admin vs
system admin things) with operators in berlin ops meetup and
via emails, and policy popup meetings, we got the feedback that
we need to keep the legacy admin behaviour same as it is otherwise
it is going to be a big breaking change for many of the operators.
Same feedback for scope_type.
- https://etherpad.opendev.org/p/BER-2022-OPS-SRBAC
- https://etherpad.opendev.org/p/rbac-operator-feedback
By considering the feedback, we decided to make all the policy
to be project scoped, release project reader role, and not to
change the legacy admin behaviour.
Use Cases
---------
Ideally most operators should be able to run without modifying policy, as
such we need to have defaults closure to the usage.
Proposed change
===============
The `RBAC community-wide goal
`_
defines all the direction and implementation usage of policy. This proposal
is to implement the phase 1 and phase 2 of the `RBAC community-wide goal
`_
Alternatives
------------
Keep the policy defaults same as it is and expect operators to override
them to behave as per their usage.
Data model impact
-----------------
None
REST API impact
---------------
The placement APIs policy will modified to add reader roles, scoped to
projects, and keep legacy behaviour same as it is. Most of the policies
will be default to 'admin-or-service' role but we will review every
policy rule default while doing the code change.
Security impact
---------------
Easier to understand policy defaults will help keep the system secure.
Notifications impact
--------------------
None
Other end user impact
---------------------
None
Performance Impact
------------------
None
Other deployer impact
---------------------
None
Developer impact
----------------
New APIs must add policies that follow the new pattern.
Upgrade impact
--------------
The scope_type of all the policy rules will be ``project`` if any
deployement is running with enforce_scope enabled and with system
scope token then they need to use the project scope token.
Also, if any API policy defaults have been modified to ``service``
role only (most of the policies will be default to admin-or-service)
then the deployment using such APIs need to override them in policy.yaml
to continue working for them.
Implementation
==============
Assignee(s)
-----------
Primary assignee:
gmann
Feature Liaison
---------------
Feature liaison:
dansmith
Work Items
----------
* Scope all policy to project
* Add project reader role in policy
* Modify policy rule unit tests
Dependencies
============
None
Testing
=======
Modify or add the policy unit tests.
Documentation Impact
====================
API Reference should be kept consistent with any policy changes, in particular
around the default reader role.
References
==========
History
=======
.. list-table:: Revisions
:header-rows: 1
* - Release Name
- Description
* - 2023.1
- Introduced
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/index.rst 0000664 0000000 0000000 00000010135 15132464062 0023712 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
=========================
Placement Specifications
=========================
Significant feature developments are tracked in documents called specifications.
From the Train cycle onward, those documents are kept in this section.
Prior to that, Placement specifications were a part of the `Nova Specs`_.
The following specifications represent the stages of design and development of
resource providers and the Placement service. Implementation details may have
changed or be partially complete at this time.
* `Generic Resource Pools `_
* `Compute Node Inventory `_
* `Resource Provider Allocations `_
* `Resource Provider Base Models `_
* `Nested Resource Providers`_
* `Custom Resource Classes `_
* `Scheduler Filters in DB `_
* `Scheduler claiming resources to the Placement API `_
* `The Traits API - Manage Traits with ResourceProvider `_
* `Request Traits During Scheduling`_
* `filter allocation candidates by aggregate membership`_
* `perform granular allocation candidate requests`_
* `inventory and allocation data migration`_ (reshaping provider trees)
* `handle allocation updates in a safe way`_
.. _Nested Resource Providers: http://specs.openstack.org/openstack/nova-specs/specs/queens/approved/nested-resource-providers.html
.. _Request Traits During Scheduling: https://specs.openstack.org/openstack/nova-specs/specs/queens/approved/request-traits-in-nova.html
.. _filter allocation candidates by aggregate membership: https://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/alloc-candidates-member-of.html
.. _perform granular allocation candidate requests: http://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/granular-resource-requests.html
.. _inventory and allocation data migration: http://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/reshape-provider-tree.html
.. _handle allocation updates in a safe way: https://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/add-consumer-generation.html
.. _Nova Specs: http://specs.openstack.org/openstack/nova-specs
Train
-----
Implemented
~~~~~~~~~~~
.. toctree::
:maxdepth: 1
:glob:
train/implemented/*
In Progress
~~~~~~~~~~~
.. toctree::
:maxdepth: 1
:glob:
train/approved/*
Xena
----
Implemented
~~~~~~~~~~~
.. toctree::
:maxdepth: 1
:glob:
xena/implemented/*
In Progress
~~~~~~~~~~~
Yoga
----
Implemented
~~~~~~~~~~~
.. toctree::
:maxdepth: 1
:glob:
yoga/implemented/*
In Progress
~~~~~~~~~~~
Zed
---
Implemented
~~~~~~~~~~~
In Progress
~~~~~~~~~~~
.. toctree::
:maxdepth: 1
:glob:
zed/approved/*
2023.1
------
Implemented
~~~~~~~~~~~
In Progress
~~~~~~~~~~~
.. toctree::
:maxdepth: 1
:glob:
2023.1/approved/*
.. toctree::
:hidden:
template.rst
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/template.rst 0000664 0000000 0000000 00000026507 15132464062 0024430 0 ustar 00root root 0000000 0000000 ..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
========================
Example Spec - The title
========================
Include the URL of your story from StoryBoard:
https://storyboard.openstack.org/#!/story/XXXXXXX
Introduction paragraph -- why are we doing anything? A single paragraph of
prose that operators can understand. The title and this first paragraph
should be used as the subject line and body of the commit message
respectively.
Some notes about the spec process:
* Not all blueprints need a spec, start with a story.
* The aim of this document is first to define the problem we need to solve,
and second agree the overall approach to solve that problem.
* This is not intended to be extensive documentation for a new feature.
For example, there is no need to specify the exact configuration changes,
nor the exact details of any DB model changes. But you should still define
that such changes are required, and be clear on how that will affect
upgrades.
* You should aim to get your spec approved before writing your code.
While you are free to write prototypes and code before getting your spec
approved, its possible that the outcome of the spec review process leads
you towards a fundamentally different solution than you first envisaged.
* But API changes are held to a much higher level of scrutiny.
As soon as an API change merges, we must assume it could be in production
somewhere, and as such, we then need to support that API change forever.
To avoid getting that wrong, we do want lots of details about API changes
up front.
Some notes about using this template:
* Your spec should be in ReSTructured text, like this template.
* Please wrap text at 79 columns.
* The filename in the git repository should start with the StoryBoard story
number. For example: ``2005171-allocation-partitioning.rst``.
* Please do not delete any of the sections in this template. If you have
nothing to say for a whole section, just write: None
* For help with syntax, see http://sphinx-doc.org/rest.html
* To test out your formatting, build the docs using ``tox -e docs`` and see the
generated HTML file in doc/build/html/specs/. The
generated file will have an ``.html`` extension where the original has
``.rst``.
* If you would like to provide a diagram with your spec, ascii diagrams are
often the best choice. http://asciiflow.com/ is a useful tool. If ascii
is insufficient, you have the option to use seqdiag_ or actdiag_.
.. _seqdiag: http://blockdiag.com/en/seqdiag/index.html
.. _actdiag: http://blockdiag.com/en/actdiag/index.html
Problem description
===================
A detailed description of the problem. What problem is this feature
addressing?
Use Cases
---------
What use cases does this address? What impact on actors does this change have?
Ensure you are clear about the actors in each use case: Developer, End User,
Deployer etc.
Proposed change
===============
Here is where you cover the change you propose to make in detail. How do you
propose to solve this problem?
If this is one part of a larger effort make it clear where this piece ends. In
other words, what's the scope of this effort?
At this point, if you would like to get feedback on if the problem and proposed
change fit in placement, you can stop here and post this for review saying:
Posting to get preliminary feedback on the scope of this spec.
Alternatives
------------
What other ways could we do this thing? Why aren't we using those? This doesn't
have to be a full literature review, but it should demonstrate that thought has
been put into why the proposed solution is an appropriate one.
Data model impact
-----------------
Changes which require modifications to the data model often have a wider impact
on the system. The community often has strong opinions on how the data model
should be evolved, from both a functional and performance perspective. It is
therefore important to capture and gain agreement as early as possible on any
proposed changes to the data model.
Questions which need to be addressed by this section include:
* What new data objects and/or database schema changes is this going to
require?
* What database migrations will accompany this change?
* How will the initial set of new data objects be generated? For example if you
need to take into account existing instances, or modify other existing data,
describe how that will work.
API impact
----------
Each API method which is either added or changed should have the following
* Specification for the method
* A description of what the method does suitable for use in user
documentation
* Method type (POST/PUT/GET/DELETE)
* Normal http response code(s)
* Expected error http response code(s)
* A description for each possible error code should be included
describing semantic errors which can cause it such as
inconsistent parameters supplied to the method, or when a
resource is not in an appropriate state for the request to
succeed. Errors caused by syntactic problems covered by the JSON
schema definition do not need to be included.
* URL for the resource
* URL should not include underscores; use hyphens instead.
* Parameters which can be passed via the url
* JSON schema definition for the request body data if allowed
* Field names should use snake_case style, not camelCase or MixedCase
style.
* JSON schema definition for the response body data if any
* Field names should use snake_case style, not camelCase or MixedCase
style.
* Example use case including typical API samples for both data supplied
by the caller and the response
* Discuss any policy changes, and discuss what things a deployer needs to
think about when defining their policy.
Note that the schema should be defined as restrictively as
possible. Parameters which are required should be marked as such and
only under exceptional circumstances should additional parameters
which are not defined in the schema be permitted (eg
additionalProperties should be False).
Reuse of existing predefined parameter types such as regexps for
passwords and user defined names is highly encouraged.
Security impact
---------------
Describe any potential security impact on the system. Some of the items to
consider include:
* Does this change touch sensitive data such as tokens, keys, or user data?
* Does this change alter the API in a way that may impact security, such as
a new way to access sensitive information or a new way to log in?
* Does this change involve cryptography or hashing?
* Does this change require the use of sudo or any elevated privileges?
* Does this change involve using or parsing user-provided data? This could
be directly at the API level or indirectly such as changes to a cache layer.
* Can this change enable a resource exhaustion attack, such as allowing a
single API interaction to consume significant server resources? Some examples
of this include launching subprocesses for each connection, or entity
expansion attacks in XML.
For more detailed guidance, please see the OpenStack Security Guidelines as
a reference (https://wiki.openstack.org/wiki/Security/Guidelines). These
guidelines are a work in progress and are designed to help you identify
security best practices. For further information, feel free to reach out
to the OpenStack Security Group at openstack-security@lists.openstack.org.
Other end user impact
---------------------
Aside from the API, are there other ways a user will interact with this
feature?
* Does this change have an impact on osc-placement? What does the user
interface there look like?
Performance Impact
------------------
Describe any potential performance impact on the system, for example
how often will new code be called, and is there a major change to the calling
pattern of existing code.
Examples of things to consider here include:
* A small change in a utility function or a commonly used decorator can have a
large impacts on performance.
* Calls which result in a database queries can have a profound impact on
performance when called in critical sections of the code.
* Will the change include any locking, and if so what considerations are there
on holding the lock?
Other deployer impact
---------------------
Discuss things that will affect how you deploy and configure OpenStack
that have not already been mentioned, such as:
* What config options are being added? Should they be more generic than
proposed? Are the default values ones which will work well in real
deployments?
* Is this a change that takes immediate effect after its merged, or is it
something that has to be explicitly enabled?
* If this change is a new binary, how would it be deployed?
* Please state anything that those doing continuous deployment, or those
upgrading from the previous release, need to be aware of. Also describe
any plans to deprecate configuration values or features.
Developer impact
----------------
Discuss things that will affect other developers working on OpenStack.
Upgrade impact
--------------
Describe any potential upgrade impact on the system.
Implementation
==============
Assignee(s)
-----------
Who is leading the writing of the code? Or is this a blueprint where you're
throwing it out there to see who picks it up?
If more than one person is working on the implementation, please designate the
primary author and contact.
Primary assignee:
Other contributors:
Work Items
----------
Work items or tasks -- break the feature up into the things that need to be
done to implement it. Those parts might end up being done by different people,
but we're mostly trying to understand the timeline for implementation.
Dependencies
============
* Include specific references to other specs or stories that this one either
depends on or is related to.
* If this requires new functionality in another project that is not yet used
document that fact.
* Does this feature require any new library dependencies or code otherwise not
included in OpenStack? Or does it depend on a specific version of a library?
Testing
=======
Please discuss the important scenarios that need to be tested, as well as
specific edge cases we should be ensuring work correctly.
Documentation Impact
====================
Which audiences are affected most by this change, and which documentation
titles on docs.openstack.org should be updated because of this change? Don't
repeat details discussed above, but reference them here in the context of
documentation for multiple audiences.
References
==========
Please add any useful references here. You are not required to have any
references. Moreover, this specification should still make sense when your
references are unavailable. Examples of what you could include are:
* Links to mailing list or IRC discussions
* Links to notes from a summit session
* Links to relevant research, if appropriate
* Anything else you feel it is worthwhile to refer to
History
=======
Optional section intended to be used each time the spec is updated to describe
new design, API or any database schema updated. Useful to let the reader
understand how the spec has changed over time.
.. list-table:: Revisions
:header-rows: 1
* - Release Name
- Description
* -
- Introduced
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/train/ 0000775 0000000 0000000 00000000000 15132464062 0023166 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/train/approved/ 0000775 0000000 0000000 00000000000 15132464062 0025006 5 ustar 00root root 0000000 0000000 2005473-support-consumer-types.rst 0000664 0000000 0000000 00000025341 15132464062 0033017 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/train/approved ..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
========================
Support Consumer Types
========================
Include the URL of your story from StoryBoard:
https://storyboard.openstack.org/#!/story/2005473
This spec aims at providing support for services to model ``consumer types``
in placement. While placement defines a consumer to be an entity consuming
resources from a provider it does not provide a way to identify similar
"types" of consumers and henceforth allow services to group/query them based
on their types. This spec proposes to associate each consumer to a particular
type defined by the service owning the consumer.
Problem description
===================
In today's placement world each allocation posted by a service is against a
provider for a consumer (ex: for an instance or a migration). However a
service may want to distinguish amongst the allocations made against its
various types of consumers (ex: nova may want to fetch allocations against
instances alone). This is currently not possible in placement and hence the
goal is to make placement aware of "types of consumers" for the services.
Use Cases
---------
* Nova using placement as its `quota calculation system`_: Currently this
approach uses the nova_api database to calculate the quota on the "number of
instances". In order for nova to be able to use placement to count the number
of "instance-consumers", there needs to be a way by which we can
differentiate "instance-consumers" from "migration-consumers".
* Ironic wanting to differentiate between "standalone-consumer" versus
"nova-consumer".
Note that it is not within the scope of placement to model the coordination of
the consumer type collisions that may arise between multiple services during
their definition. Placement will also not be able to identify or verify correct
consumer types (eg, INTANCE versus INSTANCE) from the external service's
perspective.
Proposed change
===============
In order to model consumer types in placement, we will add a new
``consumer_types`` table to the placement database which will have two columns:
#. an ``id`` which will be of type integer.
#. a ``name`` which will be of type varchar (maximum of 255 characters) and
this will have a unique constraint on it. The pattern restrictions for the
name will be similar to placement traits and resource class names, i.e
restricted to only ``^[A-Z0-9_]+$`` with length restrictions being {1, 255}.
A sample look of such a table would be:
+--------+----------+
| id | name |
+========+==========+
| 1 | UNKNOWN |
+--------+----------+
| 2 | INSTANCE |
+--------+----------+
| 3 | MIGRATION|
+--------+----------+
A new column called ``consumer_type_id`` would be added to the ``consumers``
table to map the consumer to its type.
The ``POST /allocations`` and ``PUT /allocations/{consumer_uuid}`` REST API's
will gain a new (required) key called ``consumer_type`` which is of type string
in their request body's through which the caller can specify what type of
consumer it is creating or updating the allocations for. If the specified
``consumer_type`` key is not present in the ``consumer_types`` table, a new
entry will be created. Also note that once a consumer type is created, it
lives on forever. If this becomes a problem in the future for the operators
a tool can be provided to clean them up.
In order to maintain parity between the request format of
``PUT /allocations/{consumer_uuid}`` and response format of
``GET /allocations/{consumer_uuid}``, the ``consumer_type`` key will also be
exposed through the response of ``GET /allocations/{consumer_uuid}`` request.
The external services will be able to leverage this ``consumer_type`` key
through the ``GET /usages`` REST API which will have a change in the format
of its request and response. The request will gain a new optional key called
``consumer_type`` which will enable users to query usages based on the consumer
type. The response will group the resource usages by the specified
consumer_type (if consumer_type key is not specified it will return the usages
for all the consumer_types) meaning it will gain a new ``consumer_type`` key.
Per consumer type we will also return a ``consumer_count`` of consumers of that
type.
See the `API impact`_ section for more details on how this would be done.
The above REST API changes and the corresponding changes to the ``/reshaper``
REST API will be available from a new microversion.
The existing consumers in placement would be mapped to a default consumer type
called ``UNKNOWN`` (which will be the default value while creating the model
schema) which means we do not know what type these consumers are and the
service to which the consumers belong to needs to update this information
if it wants to avail the ``consumer_types`` feature.
Alternatives
------------
We could create a new REST API to allow users to create consumer types
explicitly but it does not make sense to add a new API for a non-user facing
feature.
Data model impact
-----------------
The placement database will get a new ``consumer_types`` table that will have
a default consumer type called ``UNKNOWN`` and the ``consumers`` table will
get a new ``consumer_type_id`` column that by default will point to the
``UNKNOWN`` consumer type. The migration is intended to solely be an alembic
migration although a comparision can be done for this versus having a separate
online data migration to update null values to "UNKNOWN" to pick the faster
one.
API impact
----------
The new ``POST /allocations`` request will look like this::
{
"30328d13-e299-4a93-a102-61e4ccabe474": {
"consumer_generation": 1,
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"consumer_type": "INSTANCE", # This is new
"allocations": {
"e10927c4-8bc9-465d-ac60-d2f79f7e4a00": {
"resources": {
"VCPU": 2,
"MEMORY_MB": 3
},
"generation": 4
}
}
},
"71921e4e-1629-4c5b-bf8d-338d915d2ef3": {
"consumer_generation": 1,
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"consumer_type": "MIGRATION", # This is new
"allocations": {}
},
"48c1d40f-45d8-4947-8d46-52b4e1326df8": {
"consumer_generation": 1,
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"consumer_type": "UNKNOWN", # This is new
"allocations": {
"e10927c4-8bc9-465d-ac60-d2f79f7e4a00": {
"resources": {
"VCPU": 4,
"MEMORY_MB": 5
},
"generation": 12
}
}
}
}
The new ``PUT /allocations/{consumer_uuid}`` request will look like this::
{
"allocations": {
"4e061c03-611e-4caa-bf26-999dcff4284e": {
"resources": {
"DISK_GB": 20
}
},
"89873422-1373-46e5-b467-f0c5e6acf08f": {
"resources": {
"MEMORY_MB": 1024,
"VCPU": 1
}
}
},
"consumer_generation": 1,
"user_id": "66cb2f29-c86d-47c3-8af5-69ae7b778c70",
"project_id": "42a32c07-3eeb-4401-9373-68a8cdca6784",
"consumer_type": "INSTANCE" # This is new
}
Note that ``consumer_type`` is a required key for both these requests at
this microversion.
The new ``GET /usages`` response will look like this for a request of type
``GET /usages?project_id=&user_id=`` or
``GET /usages?project_id=`` where the consumer_type key is not
specified::
{
"usages": {
"INSTANCE": {
"consumer_count": 5,
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
"MIGRATION": {
"consumer_count": 2,
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
"UNKNOWN": {
"consumer_count": 1,
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
}
}
The new ``GET /usages`` response will look like this for a request of type
``GET /usages?project_id=&user_id=&consumer_type="INSTANCE"``
or ``GET /usages?project_id=&consumer_type="INSTANCE"`` where the
consumer_type key is specified::
{
"usages": {
"INSTANCE": {
"consumer_count": 5,
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
}
}
A special request of the form
``GET /usages?project_id=&consumer_type=all`` will be allowed to
enabled users to be able to query for the total count of all the consumers. The
response for such a request will look like this::
{
"usages": {
"all": {
"consumer_count": 3,
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
}
}
Note that ``consumer_type`` is an optional key for the ``GET /usages`` request.
The above REST API changes and the corresponding changes to the ``/reshaper``
REST API will be available from a new microversion.
Security impact
---------------
None.
Other end user impact
---------------------
The external services using this feature like nova should take the
responsibility of updating the consumer type of existing consumers
from "UNKNOWN" to the actual type through the
``PUT /allocations/{consumer_uuid}`` REST API.
Performance Impact
------------------
None.
Other deployer impact
---------------------
None.
Developer impact
----------------
None.
Upgrade impact
--------------
The ``placement-manage db sync`` command has to be run by the operators in
order to upgrade the database schema to accommodate the new changes.
Implementation
==============
Assignee(s)
-----------
Primary assignee:
Other contributors:
Work Items
----------
* Add the new ``consumer_types`` table and create a new ``consumer_type_id``
column in the ``consumers`` table with a foreign key constraint to the ``id``
column of the ``consumer_types`` table.
* Make the REST API changes in a new microversion for:
* ``POST /allocations``,
* ``PUT /allocations/{consumer_uuid}``,
* ``GET /allocations/{consumer_uuid}``,
* ``GET /usages`` and
* ``/reshaper``
Dependencies
============
None.
Testing
=======
Unit and functional tests to validate the feature will be added.
Documentation Impact
====================
The placement API reference will be updated to reflect the new changes.
References
==========
.. _quota calculation system: https://review.opendev.org/#/q/topic:bp/count-quota-usage-from-placement
History
=======
.. list-table:: Revisions
:header-rows: 1
* - Release Name
- Description
* - Train
- Introduced
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/train/implemented/ 0000775 0000000 0000000 00000000000 15132464062 0025471 5 ustar 00root root 0000000 0000000 2005297-negative-aggregate-membership.rst 0000664 0000000 0000000 00000032724 15132464062 0034661 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/train/implemented ..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
===================================================
Support filtering by forbidden aggregate membership
===================================================
https://storyboard.openstack.org/#!/story/2005297
This blueprint proposes to support for negative filtering by the underlying
resource provider's aggregate membership.
Problem description
===================
Placement currently supports ``member_of`` query parameters for the
``GET /resource_providers`` and ``GET /allocation_candidates`` endpoints.
This parameter is either "a string representing an aggregate uuid" or "the
prefix ``in:`` followed by a comma-separated list of strings representing
aggregate uuids".
For example::
&member_of=in:,&member_of=
would translate logically to:
"Candidate resource providers should be in either agg1 or agg2, but definitely
in agg3." (See `alloc-candidates-member-of`_ spec for details)
However, there is no expression for forbidden aggregates in the API. In other
words, we have no way to say "don't use resource providers in this special
aggregate for non-special workloads".
Use Cases
---------
This feature is useful to save special resources for specific users.
Use Case 1
~~~~~~~~~~
Some of the compute host are *Licensed Windows Compute Host*, meaning any VMs
booted on this compute host will be considered as licensed Windows image and
depending on the usage of VM, operator will charge it to the end-users.
As an operator, I want to avoid booting images/volumes other than Windows OS
on *Licensed Windows Compute Host*.
Use Case 2
~~~~~~~~~~
Reservation projects like blazar would like to have its own aggregate for
host reservation in order to have consumers without any reservations to be
scheduled outside of that aggregate in order to save the reserved resources.
Proposed change
===============
Adjust the handling of the ``member_of`` parameter so that aggregates can be
expressed as forbidden. Forbidden aggregates are prefixed with a ``!``.
In the following example::
&member_of=!
would translate logically to:
"Candidate resource providers should *not* be in agg1"
This negative expression can also be used in multiple ``member_of``
parameters::
&member_of=in:,&member_of=&member_of=!
would translate logically to:
"Candidate resource providers must be at least one of agg1 or agg2,
definitely in agg3 and definitely *not* in agg4."
Note that we don't support ``!`` for arguments to the ``in:`` prefix::
&member_of=in:,,!
This would result in HTTP 400 Bad Request error.
Instead, we support ``!in:`` prefix::
&member_of=!in:,,
which is equivalent to::
member_of=!&member_of=!&member_of=!
Nested resource providers
-------------------------
For nested resource providers, an aggregate on a root provider automatically
spans the whole tree. When a root provider is in forbidden aggregates, the
child providers can't be a candidate even if the child provider belongs to no
(or another different) aggregate.
In the following environments, for example,
.. code::
+-----------------------+
| sharing storage (ss1) |
| agg: [aggB] |
+-----------+-----------+
| aggB
+------------------------------+ +--------------|--------------+
| +--------------------------+ | | +------------+------------+ |
| | compute node (cn1) | | | |compute node (cn2) | |
| | agg: [aggA] | | | | agg: [aggB] | |
| +-----+-------------+------+ | | +----+-------------+------+ |
| | parent | parent | | | parent | parent |
| +-----+------+ +----+------+ | | +----+------+ +----+------+ |
| | numa1_1 | | numa1_2 | | | | numa2_1 | | numa2_2 | |
| | agg:[aggC]| | agg:[] | | | | agg:[] | | agg:[] | |
| +-----+------+ +-----------+ | | +-----------+ +-----------+ |
+-------|----------------------+ +-----------------------------+
| aggC
+-----+-----------------+
| sharing storage (ss2) |
| agg: [aggC] |
+-----------------------+
the exclusion constraint is as follows:
* ``member_of=!`` excludes "cn1", "numa1_1" and "numa1_2".
* ``member_of=!`` excludes "cn2", "numa2_1", "numa2_2", and "ss1".
* ``member_of=!`` excludes "numa1_1" and "ss2".
Note that this spanning doesn't happen on numbered ``member_of`` parameters,
which is used for the granular request:
* ``member_of=!`` excludes "cn1"
* ``member_of=!`` excludes "cn2" and "ss1"
* ``member_of=!`` excludes "numa1_1" and "ss2".
See `granular-resource-request`_ spec for details.
Alternatives
------------
We can use forbidden traits to exclude specific resource providers, but if we
use traits, then we should put Blazar or windows license trait not only on
root providers but also on every resource providers in the tree, so we don't
take this way.
We can also create nova scheduler filters to do post-processing of compute
hosts by looking at host aggregate relationships just as `BlazarFilter`_
does today. However, this is inefficient and we don't want to develop/use
another filter for the windows license use case.
Data model impact
-----------------
None.
REST API impact
---------------
A new microversion will be created which will update the validation for the
``member_of`` parameter on ``GET /allocation_candidates`` and ``GET
/resource_providers`` to accept ``!`` both as a prefix on aggregate uuids and
as a prefix to the ``in:`` prefix to express that the prefixed aggregate (or
the aggregates) is to be excluded from the results.
We do not return 400 if an agg UUID is found on both the positive and negative
sides of the request. For example::
&member_of=in:,&member_of=!
The first member_of would return all resource_providers in either agg1 or agg2,
while the second member_of would eliminate those in agg2. The result will be a
200 containing just those resource_providers in agg1. Likewise, we do not
return 400 for cases like::
&member_of=&member_of=!
As in the previous example, we return 200 with empty results, since this is a
syntactically valid request, even though a resource provider cannot be both
inside and outside of agg1 at the same time.
Security impact
---------------
None.
Notifications impact
--------------------
None.
Other end user impact
---------------------
None.
Performance Impact
------------------
Queries to the database will see a moderate increase in complexity but existing
table indexes should handle this with aplomb.
Other deployer impact
---------------------
None.
Developer impact
----------------
This helps us to develop a simple reservation mechanism without having a
specific nova filter, for example, via the following flow:
0. Operator who wants to enable blazar sets default forbidden and required
membership key in the ``nova.conf``.
* The parameter key in the configuration file is something like
``[scheduler]/placement_req_default_forbidden_member_prefix`` and the
value is set by the operator to ``reservation:``.
* The parameter key in the configuration file is something like
``[scheduler]/placement_req_required_member_prefix`` and the value
would is set by the operator to ``reservation:``.
1. Operator starts up the service and makes a host-pool for reservation via
blazar API
* Blazar makes an nova aggregate with ``reservation:`` metadata
on initialization as a blazar's free pool
* Blazar puts hosts specified by the operator into the free pool aggregate
on demand
2. User uses blazar to make a host reservation and to get the reservation id
* Blazar picks up a host from the blazar's free pool
* Blazar creates a new nova aggregate for that reservation and set that
aggregate's metadata key to ``reservation:`` and puts the
reserved host into that aggregate
3. User creates a VM with a flavor/image with ``reservation:``
meta_data/extra_specs to consume the reservation
* Nova finds in the flavor that the extra_spec has a key which starts with
what is set in ``[scheduler]/placement_req_required_member_prefix``,
and looks up the table for aggregates which has the specified metadata::
required_prefix = CONF.scheduler.placement_req_required_member_prefix
# required_prefix = 'reservation:'
required_meta_data = get_flavor_extra_spec_starts_with(required_prefix)
# required_meta_data = 'reservation:'
required_aggs = aggs_whose_metadata_is(required_meta_data)
# required_aggs = []
* Nova finds out that the default forbidden aggregate metadata prefix,
which is set in
``[scheduler]/placement_req_default_forbidden_member_prefix``, is
explicitly via the flavor, so skip::
default_forbidden_prefix = CONF.scheduler.placement_req_default_forbidden_member_prefix
# default_forbidden_prefix = ['reservation:']
forbidden_aggs = set()
if not get_flavor_extra_spec_starts_with(default_forbidden_prefix):
# this is skipped because 'reservation:' is in the flavor in this case
forbidden_aggs = aggs_whose_metadata_starts_with(default_forbidden_prefix)
* Nova calls placement with required and forbidden aggregates::
# We don't have forbidden aggregates in this case
?member_of=
4. User creates a VM with a flavor/image with no reservation, that is,
without ``reservation:`` meta_data/extra_specs.
* Nova finds in the flavor that the extra_spec has no key which starts with
what is set in ``[scheduler]/placement_req_required_member_prefix``,
so no required aggregate is obtained::
required_prefix = CONF.scheduler.placement_req_required_member_prefix
# required_prefix = 'reservation:'
required_meta_data = get_flavor_extra_spec_starts_with(required_prefix)
# required_meta_data = ''
required_aggs = aggs_whose_metadata_is(required_meta_data)
# required_aggs = set()
* Nova looks up the table for default forbidden aggregates whose metadata
starts with what is set in
``[scheduler]/placement_req_default_forbidden_member_prefix``::
default_forbidden_prefix = CONF.scheduler.placement_req_default_forbidden_member_prefix
# default_forbidden_prefix = ['reservation:']
forbidden_aggs = set()
if not get_flavor_extra_spec_starts_with(default_forbidden_prefix):
# This is not skipped now
forbidden_aggs = aggs_whose_metadata_starts_with(default_forbidden_prefix)
# forbidden_aggs =
* Nova calls placement with required and forbidden aggregates::
# We don't have required aggregates in this case
?member_of=!in:
Note that the change in the nova configuration file and change in the request
filter is an example and out of the scope of this spec. An alternative for this
is to let placement be aware of the default forbidden traits/aggregates (See
the `Bi-directional enforcement of traits`_ spec). But we agreed that it is not
placement but nova which is responsible for what traits/aggregate is
forbidden/required for the instance.
Upgrade impact
--------------
None.
Implementation
==============
Assignee(s)
-----------
Primary assignee:
Tetsuro Nakamura (nakamura.tetsuro@lab.ntt.co.jp)
Work Items
----------
* Update the ``ResourceProviderList.get_all_by_filters`` and
``AllocationCandidates.get_by_requests`` methods to change the database
queries to filter on "not this aggregate".
* Update the placement API handlers for ``GET /resource_providers`` and ``GET
/allocation_candidates`` in a new microversion to pass the negative
aggregates to the methods changed in the steps above, including input
validation adjustments.
* Add functional tests of the modified database queries.
* Add gabbi tests that express the new queries, both successful queries and
those that should cause a 400 response.
* Release note for the API change.
* Update the microversion documents to indicate the new version.
* Update placement-api-ref to show the new query handling.
Dependencies
============
None.
Testing
=======
Normal functional and unit testing.
Documentation Impact
====================
Document the REST API microversion in the appropriate reference docs.
References
==========
* `alloc-candidates-member-of`_ feature
* `granular-resource-request`_ feature
.. _`alloc-candidates-member-of`: https://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/alloc-candidates-member-of.html
.. _`granular-resource-request`: https://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/granular-resource-requests.html
.. _`BlazarFilter`: https://github.com/openstack/blazar-nova/tree/stable/rocky/blazarnova/scheduler/filters
.. _`Bi-directional enforcement of traits`: https://review.opendev.org/#/c/593475/
History
=======
.. list-table:: Revisions
:header-rows: 1
* - Release Name
- Description
* - Stein
- Approved but not implemented
* - Train
- Reproposed
2005575-nested-magic-1.rst 0000664 0000000 0000000 00000062275 15132464062 0031503 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/train/implemented ..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
===================================
Getting On The Nested Magic Train 1
===================================
https://storyboard.openstack.org/#!/story/2005575
This spec describes a cluster of Placement API work to support several
interrelated use cases for Train around:
* Modeling complex trees such as NUMA layouts, multiple devices, networks.
* Requesting affinity [#]_ between/among the various providers/allocations in
allocation candidates against such layouts.
* Describing granular groups more richly to facilitate the above.
* Requesting candidates based on traits that are not necessarily associated
with resources.
An additional spec, for a feature known as `can_split`_ has been separated out
to its own spec to ensure that any delay in it does not impact these features,
which are less controversial.
.. [#] The kind of affinity we're talking about is best understood by
referring to the use case for the `same_subtree`_ feature below.
Principles
==========
In developing this design, some fundamental concepts have come to light. These
are not really changes from the existing architecture, but understanding them
becomes more important in light of the changes introduced herein.
Resource versus Provider Traits
-------------------------------
The database model associates traits with resource providers, not with
inventories of resource classes. However, conceptually there are two different
categories of traits to consider.
.. _`resource traits`:
**Resource Traits** are tied to specific resources. For example,
``HW_CPU_X86_AVX2`` describes a characteristic of ``VCPU`` (or ``PCPU``)
resources.
.. _`provider traits`:
**Provider Traits** are characteristics of a provider, regardless of the
resources it provides. For example, ``COMPUTE_VOLUME_MULTI_ATTACH`` is a
capability of a compute host, not of any specific resource inventory.
``HW_NUMA_ROOT`` describes NUMA affinity among *all* the resources in the
inventories of that provider *and* all its descendants.
``CUSTOM_PHYSNET_PUBLIC`` indicates connectivity to the ``public`` network,
regardless of whether the associated resources are ``VF``, ``PF``, ``VNIC``,
etc.; and regardless of whether those resources reside on the provider marked
with the trait or on its descendants.
This distinction becomes important when deciding how to model. **Resource
traits** need to "follow" their resource class. For example,
``HW_CPU_X86_AVX2`` should be on the provider of ``VCPU`` (or ``PCPU``)
resource, whether that's the root or a NUMA child. On the other hand,
**provider traits** must stick to their provider, regardless of where resources
inventories are placed. For example, ``COMPUTE_VOLUME_MULTI_ATTACH`` should
always be on the root provider, as the root provider conceptually represents
"the compute host".
.. _`Traits Flow Down`:
**Alternative: "Traits Flow Down":** There have_ been_ discussions_ around a
provider implicitly inheriting the traits of its parent (and therefore all its
ancestors). This would (mostly) allow us not to think about the distinction
between "resource" and "provider" traits. We ultimately decided against this by
a hair, mainly because of this:
It makes no sense to say my PGPU is capable of MULTI_ATTACH
In addition, IIUC, there are SmartNICs [1] that have CPUs on cards.
If someone will want to report/model those CPUs in placement, they
will be scared that CPU traits on compute side flow down to those
CPUs on NIC despite they are totally different CPUs.
[1] https://www.netronome.com/products/smartnic/overview/
...and because we were able to come up with other satisfactory solutions to our
use cases.
.. _have: http://lists.openstack.org/pipermail/openstack-discuss/2019-April/005201.html
.. _been: http://lists.openstack.org/pipermail/openstack-discuss/2019-April/004817.html
.. _discussions: https://review.opendev.org/#/c/662191/3/doc/source/specs/train/approved/2005575-nested-magic-1.rst@266
Group-Specific versus Request-Wide Query Parameters
---------------------------------------------------
`granular resource requests`_ introduced a divide between ``GET
/allocation_candidates`` query parameters which apply to a particular request
group
* resources[$S]
* required[$S]
* member_of[$S]
* in_tree[$S]
.. _`request-wide`:
...and those which apply to the request as a whole
* limit
* group_policy
This has been fairly obvious thus far; but this spec introduces concepts (such
as `root_required`_ and `same_subtree`_) that make it important to keep this
distinction in mind. Moving forward, we should consider whether new features
and syntax additions make more sense to be group-specific or request-wide.
.. _`granular resource requests`: http://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/granular-resource-requests.html
Proposed change
===============
All changes are to the ``GET /allocation_candidates`` operation via new
microversions, one per feature described below.
arbitrary group suffixes
------------------------
**Use case:** Client code managing request groups for different kinds of
resources - which will often come from different providers - may reside in
different places in the codebase. For example, the management of compute
resources vs. networks vs. accelerators. However, there still needs to be a way
for the consuming code to express relationships (such as affinity) among these
request groups. For this purpose, API consumers wish to be able to use
conventions for request group identifiers. It would also be nice for
development and debugging purposes if these designations had some element of
human readability.
(Merged) code is here: https://review.opendev.org/#/c/657419/
Granular groups are currently restricted to using integer suffixes. We will
change this so they can be case-sensitive strings up to 64 characters long
comprising alphanumeric (either case), underscore, and hyphen.
* 64c so we can fit a stringified UUID (with hyphens) as well as some kind of
handy type designation. Like ``resources_PORT_$UUID``.
https://review.opendev.org/#/c/657419/4/placement/schemas/allocation_candidate.py@19
* We want to allow uppercase so consumers can make nice visual distinctions
like ``resources_PORT...``; we want to allow lowercase because openstack
consumers tend to use lowercase UUIDs and this makes them not have to convert
them. Placement will use the string in the form it is given and transform
it neither on input nor output. If the form does not match constraints a
``400`` response will be returned.
https://review.opendev.org/#/c/657419/4/placement/schemas/allocation_candidate.py@19
* **Alternative** Uppercase only so we don't have to worry about case
sensitivity or confusing differentiation from the prefixes (which are
lowercase). **Rejected** because we prefer allowing lowercase UUIDs, and are
willing to give the consumer the rope.
https://review.opendev.org/#/c/657419/1/placement/lib.py@31
* Hyphens so we can use UUIDs without too much scrubbing.
For purposes of documentation (and this spec), we'll rename the "unnumbered"
group to "unspecified" or "unsuffixed", and anywhere we reference "numbered"
groups we can call them "suffixed" or "granular" (I think this label is already
used in some places).
same_subtree
------------
**Use case:** I want to express affinity between/among allocations in separate
request groups. For example, that a ``VGPU`` come from a GPU affined to the
NUMA node that provides my ``VCPU`` and ``MEMORY_MB``; or that multiple network
``VF``\ s come from the same NIC.
A new ``same_subtree`` query parameter will be accepted. The value is a
comma-separated list of request group suffix strings ``$S``. Each must exactly
match a suffix on a granular group somewhere else in the request. Importantly,
the identified request groups need not have a ``resources$S`` (see
`resourceless request groups`_).
We define "same subtree" as "all of the resource providers satisfying the
request group must be rooted at one of the resource providers satisfying the
request group". Or put another way: "one of the resource providers satisfying
the request group must be the direct ancestor of all the other resource
providers satisfying the request group".
For example, given a model like::
+--------------+
| compute node |
+-------+------+
|
+---------+----------+
| |
+---------+--------+ +---------+--------+
| numa0 | | numa1 |
| VCPU: 4 (2 used) | | VCPU: 4 |
| MEMORY_MB: 2048 | | MEMORY_MB: 2048 |
+---+--------------+ +---+----------+---+
| | |
+---+----+ +---+---+ +---+---+
|fpga0_0 | |fpga1_0| |fpga1_1|
|FPGA:1 | |FPGA:1 | |FPGA:1 |
+--------+ +-------+ +-------+
to request "two VCPUs, 512MB of memory, and one FPGA from the same NUMA
node," my request could include::
?resources_COMPUTE=VCPU:2,MEMORY_MB:512
&resources_ACCEL=FPGA:1
# NOTE: The suffixes include the leading underscore!
&same_subtree=_COMPUTE,_ACCEL
This will produce candidates including::
- numa0: {VCPU:2, MEMORY_MB:512}, fpga0_0: {FPGA:1}
- numa1: {VCPU:2, MEMORY_MB:512}, fpga1_0: {FPGA:1}
- numa1: {VCPU:2, MEMORY_MB:512}, fpga1_1: {FPGA:1}
but *not*::
- numa0: {VCPU:2, MEMORY_MB:512}, fpga1_0: {FPGA:1}
- numa0: {VCPU:2, MEMORY_MB:512}, fpga1_1: {FPGA:1}
- numa1: {VCPU:2, MEMORY_MB:512}, fpga0_0: {FPGA:1}
The ``same_subtree`` query parameter is `request-wide`_, but may be repeated.
Each grouping is treated independently.
Anti-affinity
~~~~~~~~~~~~~
There were discussions about supporting ``!`` syntax in ``same_subtree`` to
express anti-affinity (e.g. ``same_subtree=$X,!$Y`` meaning "resources from
group ``$Y`` shall *not* come from the same subtree as resources from group
``$X``"). This shall be deferred to a future release.
resourceless request groups
---------------------------
**Use case:** When making use of `same_subtree`_, I want to be able to
identify a provider as a placeholder in the subtree structure even if I don't
need any resources from that provider.
It is currently a requirement that a ``resources$S`` exist for all ``$S`` in a
request. This restriction shall be removed such that a request group may exist
e.g. with only ``required$S`` or ``member_of$S``.
There must be at least one ``resources`` or ``resources$S`` somewhere in the
request, otherwise there will be no inventory to allocate and thus no
allocation candidates. If neither is present a ``400`` response will be
returned.
Furthermore, resourceless request groups must be used with `same_subtree`_.
That is, the suffix for each resourceless request group must feature in a
``same_subtree`` somewhere in the request. Otherwise a ``400`` response will be
returned. (The reasoning for this restriction_ is explained below.)
For example, given a model like::
+--------------+
| compute node |
+-------+------+
|
+-----------+-----------+
| |
+-----+-----+ +-----+-----+
|nic1 | |nic2 |
|HW_NIC_ROOT| |HW_NIC_ROOT|
+-----+-----+ +-----+-----+
| |
+----+----+ +-----+---+
| | | |
+--+--+ +--+--+ +--+--+ +--+--+
|pf1_1| |pf1_2| |pf2_1| |pf2_2|
|NET1 | |NET2 | |NET1 | |NET2 |
|VF:4 | |VF:4 | |VF:2 | |VF:2 |
+-----+ +-----+ +-----+ +-----+
a request such as the following, meaning, "Two VFs from the same NIC,
one on each of network NET1 and NET2," is legal::
?resources_VIF_NET1=VF:1
&required_VIF_NET1=NET1
&resources_VIF_NET2=VF:1
&required_VIF_NET2=NET2
# NOTE: there is no resources_NIC_AFFINITY
&required_NIC_AFFINITY=HW_NIC_ROOT
&same_subtree=_VIF_NET1,_VIF_NET2,_NIC_AFFINITY
The returned candidates will include::
- pf1_1: {VF:1}, pf1_2: {VF:1}
- pf2_1: {VF:1}, pf2_2: {VF:1}
but *not*::
- pf1_1: {VF:1}, pf2_2: {VF:1}
- pf2_1: {VF:1}, pf1_2: {VF:1}
.. _restriction:
Why enforce resourceless + same_subtree?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Taken by itself (without `same_subtree`_), a resourceless request group
intuitively means, "There must exist in the solution space a resource provider
that satisfies these constraints." But what does "solution space" mean? Clearly
it's not the same as `solution path`_, or we wouldn't be able to use it to add
resourceless providers to that solution path. So it must encompass at least the
entire non-sharing tree around the solution path. Does it also encompass
sharing providers associated via aggregate? What would that mean?
Since we have not identified any real use cases for resourceless *without*
`same_subtree`_ (other than `root_member_of`_ -- see below) making this an
error allows us to not have to deal with these questions.
root_required
-------------
**Use case:** I want to limit allocation candidates to trees `whose root
provider`_ has (or does not have) certain traits. For example, I want to limit
candidates to only multi-attach-capable hosts; or preserve my Windows-licensed
hosts for special use.
A new ``root_required`` query parameter will be accepted. The value syntax is
identical to that of ``required[$S]``: that is, it accepts a comma-delimited
list of trait names, each optionally prefixed with ``!`` to indicate
"forbidden" rather than "required".
This is a `request-wide`_ query parameter designed for `provider traits`_
specifically on the root provider of the non-sharing tree involved in the
allocation candidate. That is, regardless of any group-specific constraints,
and regardless of whether the root actually provides resource to the request,
results will be filtered such that the root of the non-sharing tree conforms to
the constraints specified in ``root_required``.
``root_required`` may not be repeated.
.. _`whose root provider`:
The fact that this feature is (somewhat awkwardly) restricted to "...trees
whose root provider ..." deserves some explanation. This is to fill a gap
in use cases that cannot be adequately covered by other query parameters.
* To land on a tree (host) with a given trait *anywhere* in its hierarchy,
`resourceless request groups`_ without `same_subtree`_ could be used.
However, there is no way to express the "forbidden" side of this in a way
that makes sense:
* A resourceless ``required$S=!FOO`` would simply ensure that a provider
*anywhere in the tree* does not have ``FOO`` - which would end up not being
restrictive as intended in most cases.
* We could define "resourceless forbidden" to mean "nowhere in the tree", but
this would be inconsistent and hard to explain.
* To ensure that the desired trait is present (or absent) in the *result set*,
it would be necessary to attach the trait to a group whose resource
constraints will be satisfied by the provider possessing (or lacking) that
trait.
* This requires the API consumer to understand too much about how the
provider trees are modeled; and
* It doesn't work in heterogeneous environments where such `provider traits`_
may or may not stick with providers of a specific resource class.
This could possibly be mitigated by careful use of `same_subtree`_, but
that again requires deep understanding of the tree model, and also confuses
the meaning of `same_subtree`_ and `resource versus provider traits`_.
* The `traits flow down`_ concept described earlier could help here; but that
would still entail attaching `provider traits`_ to a particular request
group. Which one? Because the trait isn't associated with a specific
resource, it would be arbitrary and thus difficult to explain and justify.
.. _`solution path`:
**Alternative: "Solution Path":** A more general solution was discussed whereby
we would define a "solution path" as: **The set of resource providers which
satisfy all the request groups *plus* all the ancestors of those providers, up
to the root.** This would allow us to introduce a `request-wide`_ query
parameter such as ``solution_path_required``. The idea would be the same as
``root_required``, but the specified trait constraints would be applied to all
providers in the "solution path" (required traits must be present *somewhere*
in the solution path; forbidden traits must not be present *anywhere* in the
solution path).
This alternative was rejected because:
* Describing the "solution path" concept to API consumers would be hard.
* We decided the only real use cases where the trait constraints needed to be
applied to providers *other than the root* could be satisfied (and more
naturally) in other ways.
This section was the result of long discussions `in IRC`_ and on `the review
for this spec`_
.. _`in IRC`: http://eavesdrop.openstack.org/irclogs/%23openstack-placement/%23openstack-placement.2019-06-12.log.html#t2019-06-12T15:04:48
.. _`the review for this spec`: https://review.opendev.org/#/c/662191/
root_member_of
--------------
.. note:: When this spec was initially written it was not clear whether there
was immediate need to implement this feature. This turned out to be
the case. The feature was not implemented in the Train cycle. It will
be revisted in the future if needed.
**Use case:** I want to limit allocation candidates to trees `whose root
provider`_ is (or is not) a member of a certain aggregate. For example, I want
to limit candidates to only hosts in (or not in) a specific availability zone.
.. note:: We "need" this because of the restriction_ that resourceless request
groups must be used with `same_subtree`_. Without that restriction, a
resourceless ``member_of`` would match a provider anywhere in the
tree, including the root.
``root_member_of`` is conceptually identical to `root_required`_, but for
aggregates. Like ``member_of[$S]``, ``root_member_of`` supports ``in:``, and
can be repeated (in contrast to ``[root_]required[$S]``).
Default group_policy to none
----------------------------
A single ``isolate`` setting that applies to the whole request has consistently
been shown to be inadequate/confusing/frustrating for all but the simplest
anti-affinity use cases. We're not going to get rid of ``group_policy``, but
we're going to make it no longer required, defaulting to ``none``. This will
allow us to get rid of `at least one hack`_ in nova and provide a clearer user
experience, while still allowing us to satisfy simple NUMA use cases. In the
future a `granular isolation`_ syntax should make it possible to satisfy more
complex scenarios.
.. _at least one hack: https://review.opendev.org/657796
.. _granular isolation:
(Future) Granular Isolation
---------------------------
.. note:: This is currently out of scope, but we wanted to get it written down.
The features elsewhere in this spec allow us to specify affinity pretty richly.
But anti-affinity (within a provider tree - not between providers) is still all
(``group_policy=isolate``) or nothing (``group_policy=none``). We would like to
be able to express anti-affinity between/among subsets of the suffixed groups
in the request.
We propose a new `request-wide`_ query parameter key ``isolate``. The value is
a comma-separated list of request group suffix strings ``$S``. Each must
exactly match a suffix on a granular group somewhere else in the request. This
works on `resourceless request groups`_ as well as those with resources. It is
mutually exclusive with the ``group_policy`` query parameter: 400 if both are
specified.
The effect is the resource providers satisfying each group ``$S`` must satisfy
*only* their respective group ``$S``.
At one point I thought it made sense for ``isolate`` to be repeatable. But now
I can't convince myself that ``isolate={set1}&isolate={set2}`` can ever produce
an effect different from ``isolate={set1|set2}``. Perhaps it's because
different ``isolate``\ s could be coming from different parts of the calling
code?
Another alternative would be to isolate the groups from *each other* but not
from *other groups*, in which case repeating ``isolate`` could be meaningful.
But confusing. Thought will be needed.
Interactions
------------
Some discussion on these can be found in the neighborhood of
http://eavesdrop.openstack.org/irclogs/%23openstack-placement/%23openstack-placement.2019-05-10.log.html#t2019-05-10T22:02:43
group_policy + same_subtree
~~~~~~~~~~~~~~~~~~~~~~~~~~~
``group_policy=isolate`` forces the request groups identified in
``same_subtree`` to be satisfied by different providers, whereas
``group_policy=none`` would also allow ``same_subtree`` to degenerate to
"same provider".
For example, given the following model::
+--------------+
| compute node |
+-------+------+
|
+-----------+-----------+
| |
+-----+-----+ +-----+-----+
|nic1 | |nic2 |
|HW_NIC_ROOT| |HW_NIC_ROOT|
+-----+-----+ +-----+-----+
| |
+----+----+ ...
| |
+--+--+ +--+--+
|pf1_1| |pf1_2|
|VF:4 | |VF:4 |
+-----+ +-----+
a request for "Two VFs from different PFs on the same NIC"::
?resources_VIF1=VF:1
&resources_VIF2=VF:1
&required_NIC_AFFINITY=HW_NIC_ROOT
&same_subtree=_VIF1,_VIF2,_NIC_AFFINITY
&group_policy=isolate
will return only one candidate::
- pf1_1: {VF:1}, pf1_2: {VF:1}
whereas the same request with ``group_policy=none``, meaning "Two VFs
from the same NIC"::
?resources_VIF1=VF:1
&resources_VIF2=VF:1
&required_NIC_AFFINITY=HW_NIC_ROOT
&same_subtree=_VIF1,_VIF2,_NIC_AFFINITY
&group_policy=none
will return two additional candidates where both ``VF``\ s are satisfied by
the same provider::
- pf1_1: {VF:1}, pf1_2: {VF:1}
- pf1_1: {VF:2}
- pf1_2: {VF:2}
group_policy + resourceless request groups
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Resourceless request groups are treated the same as any other for the
purposes of ``group_policy``:
* If your resourceless request group is suffixed,
``group_policy=isolate`` means the provider satisfying the resourceless
request group will not be able to satisfy any other suffixed group.
* If your resourceless request group is unsuffixed, it can be satisfied by
*any* provider in the tree, since the unsuffixed group isn't isolated (even
with ``group_policy=isolate``). This is important because there are_ cases_
where we want to require certain traits (usually `provider traits`_), and
don't want to figure out which other request group might be requesting
resources from the same provider.
same_subtree + resourceless request groups
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These *must* be used together -- see `Why enforce resourceless +
same_subtree?`_
Impacts
=======
Data model impact
-----------------
There should be no changes to database table definitions, but the
implementation will almost certainly involve adding/changing database queries.
There will also likely be changes to python-side objects representing
meta-objects used to manage information between the database and the REST
layer. However, the data models for the JSON payloads in the REST layer itself
will be unaffected.
Performance Impact
------------------
The work for ``same_subtree`` will probably (at least initially) be done on the
python side as additional filtering under ``_merge_candidates``. This could
have some performance impact especially on large data sets. Again, we should
optimize requests without ``same_subtree``, where ``same_subtree`` refers to
only one group, where no nested providers exist in the database, etc.
Resourceless request groups may add a small additional burden to
database queries, but it should be negligible. It should be relatively
rare in the wild for a resourceless request group to be satisfied by a
provider that actually provides no resource to the request, though there
are_ cases_ where a resourceless request group would be useful even
though the provider *does* provide resources to the request.
.. _are: https://review.opendev.org/#/c/645316/
.. _cases: https://review.opendev.org/#/c/656885/
Documentation Impact
--------------------
The new query parameters will be documented in the API reference.
Microversion paperwork will be done.
:doc:`/user/provider-tree` will be updated (and/or split off of).
Security impact
---------------
None
Other end user impact
---------------------
None
Other deployer impact
---------------------
None
Developer impact
----------------
None
Upgrade impact
--------------
None
Implementation
==============
Assignee(s)
-----------
* cdent
* tetsuro
* efried
* others
Dependencies
============
None
Testing
=======
Code for a gabbi fixture with some complex and interesting characteristics is
merged here: https://review.opendev.org/#/c/657463/
Lots of functional testing, primarily via gabbi, will be included.
It wouldn't be insane to write some PoC consuming code on the nova side to
validate assumptions and use cases.
References
==========
...are inline
History
=======
.. list-table:: Revisions
:header-rows: 1
* - Release Name
- Description
* - Train
- Introduced
.. _can_split: https://review.opendev.org/658510
placement-resource-provider-request-group-mapping-in-allocation-candidates.rst 0000664 0000000 0000000 00000044232 15132464062 0044673 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/train/implemented ..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
==========================================================================
Provide resource provider - request group mapping in allocation candidates
==========================================================================
https://blueprints.launchpad.net/nova/+spec/placement-resource-provider-request-group-mapping-in-allocation-candidates
To support QoS minimum bandwidth policy during server scheduling Neutron needs
to know which resource provider provides the bandwidth resource for each port
in the server create request. Similar needs arise in case of handling VGPUs
and accelerator devices.
Problem description
===================
Placement supports granular request groups in the ``GET allocation_candidates``
query but the returned allocation candidates do not contain explicit
information about which granular request group is fulfilled by which RP in the
candidate. For example the resource request of a Neutron port is mapped to a
granular request group by Nova towards Placement during scheduling. After
scheduling Neutron needs the information about which port got allocation from
which RP to set up the proper port binding towards those network device RPs.
Similar examples can be created with VGPU and accelerator devices.
Doing this mapping in Nova is possible (see the `current implementation`_) but
scales pretty badly even for small amount of ports in a single server create
request. See the `Non-scalable Nova based solution`_ section with detailed
examples and analysis.
On the other hand when Placement builds an allocation candidate it does that by
`building allocations for each granular request group`_. Therefore Placement
could include the necessary mapping information in the response with
significantly less effort.
So doing the mapping in Nova also duplicates logic that is already implemented
in Placement.
Use Cases
---------
The use case of the `bandwidth resource provider spec`_ applies here because to
fulfill that use case in a scalable way we need to consider the change proposed
in this spec. Similarly handling VGPUs and accelerator devices requires this
mapping information as well.
Proposed change
===============
Extend the response of the ``GET /allocation_candidates`` API with
an extra field ``mapping`` for each candidate. This field contains a mapping
between resource request group names and RP UUIDs for each candidate to
express which RP provides the resource for which request groups.
Alternatives
------------
For API alternatives about the proposed REST API change see the REST API
section.
Non-scalable Nova based solution
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Given a single compute with the following inventories::
Compute RP (name=compute1, uuid=compute_uuid)
+ CPU = 1
| MEMORY = 1024
| DISK = 10
|
+--+Network agent RP (for SRIOV agent),
+ uuid=sriov_agent_uuid
|
|
+--+Physical network interface RP
| uuid = uuid5(compute1:eth0)
| resources:
| NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND=2000
| NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND=2000
| traits:
| CUSTOM_PHYSNET_1
| CUSTOM_VNIC_TYPE_DIRECT
|
+--+Physical network interface RP
uuid = uuid5(compute1:eth1)
resources:
NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND=2000
NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND=2000
traits:
CUSTOM_PHYSNET_1
CUSTOM_VNIC_TYPE_DIRECT
Example 1 - boot with a single port having bandwidth request
............................................................
Neutron port::
{
'id': 'da941911-a70d-4aac-8be0-c3b263e6fd4f',
'resource_request': {
"resources": {
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND": 1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND": 1000},
"required": ["CUSTOM_PHYSNET_1",
"CUSTOM_VNIC_TYPE_DIRECT"]
}
}
Placement request during scheduling::
GET /placement/allocation_candidates?
limit=1000&
resources=DISK_GB=1,MEMORY_MB=512,VCPU=1&
required1=CUSTOM_PHYSNET_1,CUSTOM_VNIC_TYPE_DIRECT&
resources1=NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND=1000,
NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND=1000
Placement response::
{
"allocation_requests":[
{
"allocations":{
uuid5(compute1:eth0):{
"resources":{
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000
}
},
compute_uuid:{
"resources":{
"MEMORY_MB":512,
"DISK_GB":1,
"VCPU":1
}
}
}
},
// ... another similar allocations with uuid5(compute1:eth1)
],
"provider_summaries":{
// ...
}
}
Filter scheduler selects the first candidate that points to
uuid5(compute1:eth0)
The nova-compute needs to pass RP UUID which provides resource for each port
to Neutron in the port binding. To be able to do that nova (in the `current
implementation`_ the nova-conductor) needs to find the RP in the selected
allocation candidate which provides the resources the Neutron port is
requested. The `current implementation`_ does this by checking which RP
provides the matching resource classes and resource amounts.
During port binding nova updates the port with that network device RP::
{
"id":"da941911-a70d-4aac-8be0-c3b263e6fd4f",
"resource_request":{
"resources":{
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000
},
"required":[
"CUSTOM_PHYSNET_1",
"CUSTOM_VNIC_TYPE_DIRECT"
]
},
"binding:host_id":"compute1",
"binding:profile":{
"allocation": uuid5(compute1:eth0)
},
}
This scenario is easy as only one port is requesting bandwidth
resources so there will be only one RP in the each allocation
candidate that provides such resources.
Example 2 - boot with two ports having bandwidth request
........................................................
Neutron port1::
{
'id': 'da941911-a70d-4aac-8be0-c3b263e6fd4f',
'resource_request': {
"resources": {
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND": 1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND": 1000},
"required": ["CUSTOM_PHYSNET_1",
"CUSTOM_VNIC_TYPE_DIRECT"]
}
}
Neutron port2::
{
'id': '2f2613ce-95a9-490a-b3c4-5f1c28c1f886',
'resource_request': {
"resources": {
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND": 1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND": 2000},
"required": ["CUSTOM_PHYSNET_1",
"CUSTOM_VNIC_TYPE_DIRECT"]
}
}
Placement request during scheduling::
GET /placement/allocation_candidates?
group_policy=isolate&
limit=1000&
resources=DISK_GB=1,MEMORY_MB=512,VCPU=1&
required1=CUSTOM_PHYSNET_1,CUSTOM_VNIC_TYPE_DIRECT&
resources1=NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND=1000,
NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND=1000&
required2=CUSTOM_PHYSNET_1,CUSTOM_VNIC_TYPE_DIRECT&
resources2=NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND=1000,
NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND=2000
In the above request the granular request group1 is generated from
port1 and granular request group2 is generated from port2.
Placement response::
{
"allocation_requests":[
{
"allocations":{
uuid5(compute1:eth0):{
"resources":{
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":1000
}
},
uuid5(compute1:eth1):{
"resources":{
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":2000
}
},
compute_uuid:{
"resources":{
"MEMORY_MB":512,
"DISK_GB":1,
"VCPU":1
}
}
}
},
// ... another similar allocation_request where the allocated
// amounts are reversed between uuid5(compute1:eth0) and
// uuid5(compute1:eth1)
],
"provider_summaries":{
// ...
}
}
Filter scheduler selects the first candidate.
Nova needs to find the RP in the selected allocation candidate which
provides the resources for each Neutron port request.
For the selected allocation candidate there are two possible port - RP
mappings but only one valid mapping if we consider the bandwidth
amounts:
* port1 - uuid5(compute1:eth0)
* port2 - uuid5(compute1:eth1)
When Nova tries to map the first port, port1, then both
uuid5(compute1:eth0) and uuid5(compute1:eth1) still has enough
resources in the allocation request to match with the request of port1. So at
that point Nova can map port1 to uuid5(compute1:eth1). However this means
that Nova will not find any viable mapping later for port2 and therefore Nova
has to go back an retry to create the mapping with port1 mapped to the other
alternative. This means that Nova needs to implement a full backtracking
algorithm to find the proper mapping.
Scaling considerations
......................
With 4 RPs and 4 ports, in worst case, we have 4! (24) possible
mappings and each mappings needs 4 steps to be generated (assuming
that in the worst case the mapping of the 4th port is the one that
fails). So this backtrack makes 96 steps. So I think this code will
scale pretty badly.
Note that our example uses the group_policy=isolate query param
so the RPs in the allocation candidate cannot overlap. If we set
group_policy=none and therefore allow RP overlapping then the necessary
calculation step could grow even more.
Note that even if having more than 4 ports for an server considered
unrealistic, additional granular request groups can appear in the
allocation candidate request from other sources than Neutron, e.g. from flavor
extra_spec due to VGPUs or from Cyborg due to accelerators.
Data model impact
-----------------
None
REST API impact
---------------
Extend the response of the ``GET /allocation_candidates`` API with
an extra field ``mappings`` for each candidate in a new microversion. This
field contains a mapping between resource request group names and RP UUIDs for
each candidate to express which RP provides the resource for which request
groups.
For the request::
GET /placement/allocation_candidates?
resources=DISK_GB=1,MEMORY_MB=512,VCPU=1&
required1=CUSTOM_PHYSNET_1,CUSTOM_VNIC_TYPE_DIRECT&
resources1=NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND=1000,
NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND=1000&
required2=CUSTOM_PHYSNET_1,CUSTOM_VNIC_TYPE_DIRECT&
resources2=NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND=1000,
NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND=2000
Placement would return the response::
{
"allocation_requests":[
{
"allocations":{
uuid5(compute1:eth0):{
"resources":{
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":1000
},
},
uuid5(compute1:eth1):{
"resources":{
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":2000
},
},
compute_uuid:{
"resources":{
"MEMORY_MB":512,
"DISK_GB":1,
"VCPU":1
},
}
},
"mappings": {
"1": [uuid5(compute1:eth0)],
"2": [uuid5(compute1:eth1)],
"": [compute_uuid],
},
},
{
"allocations":{
uuid5(compute1:eth1):{
"resources":{
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":1000
},
},
uuid5(compute1:eth0):{
"resources":{
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":2000
},
},
compute_uuid:{
"resources":{
"MEMORY_MB":512,
"DISK_GB":1,
"VCPU":1
},
}
},
"mappings": {
"1": [uuid5(compute1:eth1)],
"2": [uuid5(compute1:eth0)],
"": [compute_uuid],
},
},
],
"provider_summaries":{
// unchanged
}
}
The numbered groups are always satisfied by a single RP so the length of the
mapping value will be always 1. However the unnumbered group might be satisfied
by more than one RPs so the length of the mapping value there can be bigger
than 1.
This new field will be added to the schema for ``POST /allocations``, ``PUT
/allocations/{consumer_uuid}``, and ``POST /reshaper`` so the client does not
need to strip it from the candidate before posting that back to Placement to
make the allocation. The contents of the field will be ignored by these
operations.
*Alternatively* the mapping can be added as a separate top level key to the
response.
Response::
{
"allocation_requests":[
{
"allocations":{
uuid5(compute1:eth0):{
"resources":{
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":1000
},
},
uuid5(compute1:eth1):{
"resources":{
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":2000
},
},
compute_uuid:{
"resources":{
"MEMORY_MB":512,
"DISK_GB":1,
"VCPU":1
},
}
}
},
{
"allocations":{
uuid5(compute1:eth0):{
"resources":{
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":2000
},
},
uuid5(compute1:eth1):{
"resources":{
"NET_BANDWIDTH_EGRESS_KILOBITS_PER_SECOND":1000,
"NET_BANDWIDTH_INGRESS_KILOBITS_PER_SECOND":1000
},
},
compute_uuid:{
"resources":{
"MEMORY_MB":512,
"DISK_GB":1,
"VCPU":1
},
}
}
},
],
"provider_summaries":{
// unchanged
}
"resource_provider-request_group-mappings":[
{
"1": [uuid5(compute1:eth0)],
"2": [uuid5(compute1:eth1)],
"": [compute_uuid],
},
{
"1": [uuid5(compute1:eth1)],
"2": [uuid5(compute1:eth0)],
"": [compute_uuid],
}
]
}
This has the advantage that the allocation requests are unchanged and
therefore still can be transparently sent back to placement
to do the allocation.
This has the disadvantage that one mapping in the
``resource_provider-request_group-mappings`` connected to one candidate
in the allocation_requests list by the list index only.
We decided to go with the primary proposal.
Security impact
---------------
None
Notifications impact
--------------------
None
Other end user impact
---------------------
None
Performance Impact
------------------
None
Other deployer impact
---------------------
None
Developer impact
----------------
None
Upgrade impact
--------------
None
Implementation
==============
Assignee(s)
-----------
Primary assignee:
None
Work Items
----------
* Extend the `placement allocation candidate generation algorithm`_ to return
the mapping that is internally calculated.
* Extend the API with a new microversion to return the mapping to the API
client as well
* Within the same microverison extend the JSON schema for ``POST
/allocations``, ``PUT /allocations/{uuid}``, and ``POST /reshaper`` to accept
(and ignore) the mappings key.
Dependencies
============
None
Testing
=======
New gabbi tests for the new API microversion and unit test to cover the
unhappy path.
Documentation Impact
====================
Placement API ref needs to be updated with the new microversion.
References
==========
.. _`building allocations for each granular request group`: https://github.com/openstack/nova/blob/6522ea3ecfe99cca3fb33258b11e5a1f34e6e8f0/nova/api/openstack/placement/objects/resource_provider.py#L4113
.. _`bandwidth resource provider spec`: https://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/bandwidth-resource-provider.html
.. _`current implementation`: https://github.com/openstack/nova/blob/58a1fcc7851930febdb4c1c7ed49357337151f0c/nova/objects/request_spec.py#L761
.. _`placement allocation candidate generation algorithm`: https://github.com/openstack/placement/blob/57026255615679122e6f305dfa3520c012f57ca7/placement/objects/allocation_candidate.py#L207
.. _`Proposed in nova spec repo`: https://review.opendev.org/#/c/597601
History
=======
.. list-table:: Revisions
:header-rows: 1
* - Release Name
- Description
* - Stein
- `Proposed in nova spec repo`_ but was not approved
* - Train
- Re-proposed in the placement repo
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/xena/ 0000775 0000000 0000000 00000000000 15132464062 0023004 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/xena/implemented/ 0000775 0000000 0000000 00000000000 15132464062 0025307 5 ustar 00root root 0000000 0000000 allow-provider-re-parenting.rst 0000664 0000000 0000000 00000017542 15132464062 0033332 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/xena/implemented ..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
========================================
Allow provider re-parenting in placement
========================================
https://storyboard.openstack.org/#!/story/2008764
This spec proposes to allow re-parenting and un-parenting (or orphaning) RPs
via ``PUT /resource_providers/{uuid}`` API in Placement.
Problem description
===================
Today placement API only allows change the parent of an RP from None to a valid
RP UUID. However there are use case when moving an RP between parents make
sense.
Use Cases
---------
* An existing PGPU RP needs to be moved under the NUMA RP when NUMA is modeled.
* We have a `neutron bug`_ that introduced an unwanted change causing that
SRIOV PF RPs was created under the root RP instead of under the neutron agent
RP. We can fix the broken logic in neutron but we cannot fix the already
wrongly parented RP in the DB via the placement API.
.. _`neutron bug`: https://bugs.launchpad.net/neutron/+bug/1921150
Proposed change
===============
Re-parenting is rejected today and the code has the following `comment`_ :
TODO(jaypipes): For now, "re-parenting" and "un-parenting" are
not possible. If the provider already had a parent, we don't
allow changing that parent due to various issues, including:
* if the new parent is a descendant of this resource provider, we
introduce the possibility of a loop in the graph, which would
be very bad
* potentially orphaning heretofore-descendants
So, for now, let's just prevent re-parenting...
.. _`comment`: https://github.com/openstack/placement/blob/6f00ba5f685183539d0ebf62a4741f2f6930e051/placement/objects/resource_provider.py#L777
The first reason is moot as the loop check is already needed and implemented
for the case when the parent is updated from None to an RP.
The second reason does not make sense to me. By moving an RP under another RP
all the descendants should be moved as well. Similarly how the None -> UUID
case works today. So I don't see how can we orphan any RP by re-parenting.
I see the following possible cases of move:
* RP moved upwards, downwards, side-wards in the same RP tree
* RP moved to a different tree
* RP moved to top level, becoming a new root RP
From placement perspective every case results in one or more valid RP trees.
Based on the data model if there was allocations against the moved RP those
allocations will still refer to the RP after the move. This means that if a
consumer has allocation against a single RP tree before the move might have
allocation against multiple trees after the RP move. Such consumer is already
supported today.
An RP move might invalidate the original intention of the consumer. If the
consumer used an allocation candidate query to select and allocate resources
then by such query the consumer defined a set of rules (e.g. in_tree,
same_subtree) the allocation needs to fulfill. The rules might not be valid
after an RP is moved. However placement never promised to keep such invariant
as that would require the storage of the rules and correlating allocation
candidate queries and allocations. Moreover such issue can already
be created with the POST /reshape API as well. Therefore keeping any such
invariant is the responsibility of the client. So I propose to start supporting
all form of RP re-parenting in a new placement API microversion.
Alternatives
------------
See the API alternatives below.
Data model impact
-----------------
None
REST API impact
---------------
In a new microversion allow changing the parent_uuid of a resource provider to
None or to any valid RP uuid that does not cause a loop in any of the trees via
the ``PUT /resource_providers/{uuid}`` API.
Protecting against unwanted changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As noted above re-parenting can significantly change the RP model in the
Placement database. So such action needs to be done carefully. While the
Placement API is already admin only by default, the request is raised on the
Xena PTG for extra safety measures against unintentional parent changes.
During the spec discussion every the reviewer expressed the view that such
safety measure is not really needed. So this spec only propose to use the new
microversion and extensive documentation to signal the new behavior.
Still there is the list of alternatives discussed during the review:
* *Do nothing*: While it is considered not safe enough during the PTG, during
the spec review we ended up choosing this as the main solution.
* *A new query parameter*: A new query parameter is proposed for the
``PUT /resource_providers/{uuid}`` API called ``allow_reparenting`` the
default value of the query parameter is ``False`` and the re-parenting cases
defined in this spec is only accepted by Placement if the request contains
the new query parameter with the ``True``. It is considered hacky to add a
query parameter for a PUT request.
* *A new field in the request body*: This new field would have the same meaning
as the proposed query parameter but it would be put into the request body. It
is considered non-RESTful as such field is not persisted or returned as the
result of the PUT request as it does not belong to the representation of the
ResourceProvider entity the PUT request updates.
* *A new Header*: Instead of a new query paramtere use a new HTTP header
``x-openstack-placement-allow-provider-reparenting:True``. As the name shows
this needs a lot more context encoded in it to be specific for the API it
modifies while the query parameter already totally API specific.
* *Use a PATCH request for updating the parent*: While this would make the
parent change more explicit it would also cause great confusion for the
client for multiple reasons:
1) Other fields of the same resource provider entity can be updated via the
PUT request, but not the ``parent_uuid`` field.
2) Changing the ``parent_uuid`` field from None to a valid RP uuid is
supported by the PUT request but to change it from one RP uuid to another
would require a totally different ``PATCH`` request.
* *Use a sub resource*: Signal the explicit re-parenting either in a form of
``PUT /resource-providers/{uuid}/force`` or
``PUT /resource-providers/{uuid}/parent_uuid/{parent}``. While the second
option seems to be acceptable to multiple reviewers, I think it will be
confusing similarly to ``PATCH``. It would create another way to update a
field of an entity while other fields still updated directly on the parent
resource.
Security impact
---------------
None
Notifications impact
--------------------
N/A
Other end user impact
---------------------
None
Performance Impact
------------------
The loop detection and the possible update of all the RPs in the changed
subtree with a new ``root_provider_id`` needs extra processing. However the
re-parenting operation is considered very infrequent. So the overall Placement
performance is not affected.
Other deployer impact
---------------------
None
Developer impact
----------------
None
Upgrade impact
--------------
None
Implementation
==============
Assignee(s)
-----------
Primary assignee:
balazs-gibizer
Feature Liaison
---------------
Feature liaison:
None
Work Items
----------
* Add a new microversion to the Placement API. Implement an extended loop
detection and update ``root_provider_id`` of the subtree if needed.
* Mark the new microversion osc-placement as supported.
Dependencies
============
None
Testing
=======
* Unit testing
* Gabbit API testing
Documentation Impact
====================
* API doc needs to be updated. Warn the user that this is a potentially
dangerous operation.
References
==========
None
History
=======
.. list-table:: Revisions
:header-rows: 1
* - Release Name
- Description
* - Xena
- Introduced
support-consumer-types.rst 0000664 0000000 0000000 00000024564 15132464062 0032504 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/xena/implemented ..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
======================
Support Consumer Types
======================
https://storyboard.openstack.org/#!/story/2005473
This spec aims at providing support for services to model ``consumer types``
in placement. While placement defines a consumer to be an entity consuming
resources from a provider it does not provide a way to identify similar
"types" of consumers and henceforth allow services to group/query them based
on their types. This spec proposes to associate each consumer to a particular
type defined by the service owning the consumer.
Problem description
===================
In today's placement world each allocation posted by a service is against a
provider for a consumer (ex: for an instance or a migration). However a
service may want to distinguish amongst the allocations made against its
various types of consumers (ex: nova may want to fetch allocations against
instances alone). This is currently not possible in placement and hence the
goal is to make placement aware of "types of consumers" for the services.
Use Cases
---------
* Nova using placement as its `quota calculation system`_: Currently this
approach uses the nova_api database to calculate the quota on the "number of
instances". In order for nova to be able to use placement to count the number
of "instance-consumers", there needs to be a way by which we can
differentiate "instance-consumers" from "migration-consumers".
* Ironic wanting to differentiate between "standalone-consumer" versus
"nova-consumer".
Note that it is not within the scope of placement to model the coordination of
the consumer type collisions that may arise between multiple services during
their definition. Placement will also not be able to identify or verify correct
consumer types (eg, INTANCE versus INSTANCE) from the external service's
perspective.
Proposed change
===============
In order to model consumer types in placement, we will add a new
``consumer_types`` table to the placement database which will have two columns:
#. an ``id`` which will be of type integer.
#. a ``name`` which will be of type varchar (maximum of 255 characters) and
this will have a unique constraint on it. The pattern restrictions for the
name will be similar to placement traits and resource class names, i.e
restricted to only ``^[A-Z0-9_]+$`` with length restrictions being {1, 255}.
A sample look of such a table would be:
+--------+----------+
| id | name |
+========+==========+
| 1 | INSTANCE |
+--------+----------+
| 2 | MIGRATION|
+--------+----------+
A new column called ``consumer_type_id`` would be added to the ``consumers``
table to map the consumer to its type.
The ``POST /allocations`` and ``PUT /allocations/{consumer_uuid}`` REST API's
will gain a new (required) key called ``consumer_type`` which is of type string
in their request body's through which the caller can specify what type of
consumer it is creating or updating the allocations for. If the specified
``consumer_type`` key is not present in the ``consumer_types`` table, a new
entry will be created. Also note that once a consumer type is created, it
lives on forever. If this becomes a problem in the future for the operators
a tool can be provided to clean them up.
In order to maintain parity between the request format of
``PUT /allocations/{consumer_uuid}`` and response format of
``GET /allocations/{consumer_uuid}``, the ``consumer_type`` key will also be
exposed through the response of ``GET /allocations/{consumer_uuid}`` request.
The external services will be able to leverage this ``consumer_type`` key
through the ``GET /usages`` REST API which will have a change in the format
of its request and response. The request will gain a new optional key called
``consumer_type`` which will enable users to query usages based on the consumer
type. The response will group the resource usages by the specified
consumer_type (if consumer_type key is not specified it will return the usages
for all the consumer_types) meaning it will gain a new ``consumer_type`` key.
Per consumer type we will also return a ``consumer_count`` of consumers of that
type.
See the `REST API impact`_ section for more details on how this would be done.
The above REST API changes and the corresponding changes to the ``/reshaper``
REST API will be available from a new microversion.
The existing consumers in placement will have a ``NULL`` value in their
consumer_type_id field, which means we do not know what type these consumers
are and the service to which the consumers belong to needs to update this
information if it wants to avail the ``consumer_types`` feature.
Alternatives
------------
We could create a new REST API to allow users to create consumer types
explicitly but it does not make sense to add a new API for a non-user facing
feature.
Data model impact
-----------------
The placement database will get a new ``consumer_types`` table and the
``consumers`` table will get a new ``consumer_type_id`` column that by default
will be ``NULL``.
REST API impact
---------------
The new ``POST /allocations`` request will look like this::
{
"30328d13-e299-4a93-a102-61e4ccabe474": {
"consumer_generation": 1,
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"consumer_type": "INSTANCE", # This is new
"allocations": {
"e10927c4-8bc9-465d-ac60-d2f79f7e4a00": {
"resources": {
"VCPU": 2,
"MEMORY_MB": 3
},
"generation": 4
}
}
},
"71921e4e-1629-4c5b-bf8d-338d915d2ef3": {
"consumer_generation": 1,
"project_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"user_id": "131d4efb-abc0-4872-9b92-8c8b9dc4320f",
"consumer_type": "MIGRATION", # This is new
"allocations": {}
}
}
The new ``PUT /allocations/{consumer_uuid}`` request will look like this::
{
"allocations": {
"4e061c03-611e-4caa-bf26-999dcff4284e": {
"resources": {
"DISK_GB": 20
}
},
"89873422-1373-46e5-b467-f0c5e6acf08f": {
"resources": {
"MEMORY_MB": 1024,
"VCPU": 1
}
}
},
"consumer_generation": 1,
"user_id": "66cb2f29-c86d-47c3-8af5-69ae7b778c70",
"project_id": "42a32c07-3eeb-4401-9373-68a8cdca6784",
"consumer_type": "INSTANCE" # This is new
}
Note that ``consumer_type`` is a required key for both these requests at
this microversion.
The new ``GET /usages`` response will look like this for a request of type
``GET /usages?project_id=&user_id=`` or
``GET /usages?project_id=`` where the consumer_type key is not
specified::
{
"usages": {
"INSTANCE": {
"consumer_count": 5,
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
"MIGRATION": {
"consumer_count": 2,
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
"unknown": {
"consumer_count": 1,
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
}
}
The new ``GET /usages`` response will look like this for a request of type
``GET /usages?project_id=&user_id=&consumer_type="INSTANCE"``
or ``GET /usages?project_id=&consumer_type="INSTANCE"`` where the
consumer_type key is specified::
{
"usages": {
"INSTANCE": {
"consumer_count": 5,
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
}
}
A special request of the form
``GET /usages?project_id=&consumer_type=all`` will be allowed to
enable users to be able to query for the total count of all the consumers. The
response for such a request will look like this::
{
"usages": {
"all": {
"consumer_count": 3,
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
}
}
A special request of the form
``GET /usages?project_id=&consumer_type=unknown`` will be allowed
to enable users to be able to query for the total count of the consumers that
have no consumer type assigned. The response for such a request will look like
this::
{
"usages": {
"unknown": {
"consumer_count": 3,
"DISK_GB": 5,
"MEMORY_MB": 512,
"VCPU": 2
}
}
}
Note that ``consumer_type`` is an optional key for the ``GET /usages`` request.
The above REST API changes and the corresponding changes to the ``/reshaper``
REST API will be available from a new microversion.
Security impact
---------------
None.
Notifications impact
--------------------
N/A
Other end user impact
---------------------
The external services using this feature like nova should take the
responsibility of updating the consumer type of existing consumers
from ``NULL`` to the actual type through the
``PUT /allocations/{consumer_uuid}`` REST API.
Performance Impact
------------------
None.
Other deployer impact
---------------------
None.
Developer impact
----------------
None.
Upgrade impact
--------------
The ``placement-manage db sync`` command has to be run by the operators in
order to upgrade the database schema to accommodate the new changes.
Implementation
==============
Assignee(s)
-----------
Primary assignee:
Other contributors:
Work Items
----------
* Add the new ``consumer_types`` table and create a new ``consumer_type_id``
column in the ``consumers`` table with a foreign key constraint to the ``id``
column of the ``consumer_types`` table.
* Make the REST API changes in a new microversion for:
* ``POST /allocations``,
* ``PUT /allocations/{consumer_uuid}``,
* ``GET /allocations/{consumer_uuid}``,
* ``GET /usages`` and
* ``/reshaper``
Dependencies
============
None.
Testing
=======
Unit and functional tests to validate the feature will be added.
Documentation Impact
====================
The placement API reference will be updated to reflect the new changes.
References
==========
.. _quota calculation system: https://review.opendev.org/#/q/topic:bp/count-quota-usage-from-placement
History
=======
.. list-table:: Revisions
:header-rows: 1
* - Release Name
- Description
* - Train
- Introduced
* - Xena
- Reproposed
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/yoga/ 0000775 0000000 0000000 00000000000 15132464062 0023010 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/yoga/implemented/ 0000775 0000000 0000000 00000000000 15132464062 0025313 5 ustar 00root root 0000000 0000000 2005345-placement-mixing-required-traits-with-any-traits.rst 0000664 0000000 0000000 00000012547 15132464062 0040244 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/yoga/implemented ..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
==============================================
Support mixing required traits with any traits
==============================================
https://storyboard.openstack.org/#!/story/2005345
The `any-traits-in-allocation-candidates-query`_ spec proposed to allow
querying traits in the form of ``required=in:TRAIT1,TRAIT2``. This spec goes
one step further and proposes to allow repeating the ``required`` query
parameter to support mixing both ``required=TRAIT1,TRAIT2,!TRAIT3`` and
``required=in:TRAIT1,TRAIT2`` format in a single query. This is needed for
Neutron to be able to express that a port needs a resource provider having
a specific ``vnic_type`` trait but also having one of the physnet traits the
port's network maps to.
For example::
GET /allocation_candidates?required1=CUSTOM_VNIC_TYPE_DIRECT&
required1=in:CUSTOM_PHYSNET_FOO,CUSTOM_PHYSNET_BAR
...
requests a networking device RP in the candidates that supports the ``direct``
``vnic_type`` and is connected either to ``physnet_foo`` or ``physnet_bar`` or
both.
Problem description
===================
Neutron through Nova needs to be able to query Placement for allocation
candidates that are matching to *at least one* trait from the list of traits as
well as matching another specific trait in a single query.
Use Cases
---------
Neutron wants to use this any(traits) query to express that a port's bandwidth
resource request needs to be fulfilled by a Network device RP that is connected
to one of the physnets the network of the given port is connected to. With
Neutron's multiprovider network extension a single Neutron network can consist
of multiple network segments connected to different physnets. But at the same
time Neutron wants to express that the same RP has a specific vnic_type trait
as well.
Proposed change
===============
Extend the ``GET /allocation_candidates`` and ``GET /resource_providers``
requests to allow repeating the ``required`` and ``required`` query param
to support both the ``required=TRAIT1,TRAIT2,!TRAIT3`` and
``required=in:TRAIT1,TRAIT2`` syntax in a single query.
Alternatives
------------
None
Data model impact
-----------------
None
REST API impact
---------------
In a new microversion the ``GET /allocation_candidates`` and the
``GET /resource_providers`` query should allow repeating the ``required``
query parameter more than once while supporting both normal and any trait
syntax in the same query.
The ``GET /allocation_candidates`` query having
``required=CUSTOM_VNIC_TYPE_NORMAL&
required=in:CUSTOM_PHYSNET1,CUSTOM_PHYSNET2`` parameters should result in
allocation candidates where each allocation candidate has the traits
``CUSTOM_VNIC_TYPE_NORMAL`` and either ``CUSTOM_PHYSNET1`` or
``CUSTOM_PHYSNET2`` (or both).
The ``GET /resource_providers`` query having
``required=CUSTOM_VNIC_TYPE_NORMAL&
required=in:CUSTOM_PHYSNET1,CUSTOM_PHYSNET2`` parameters should result in
resource providers where each resource provider has the traits
``CUSTOM_VNIC_TYPE_NORMAL`` and either ``CUSTOM_PHYSNET1`` or
``CUSTOM_PHYSNET2`` (or both).
The response body of the ``GET /allocation_candidates`` and
``GET /resource_providers`` query are unchanged.
Note the following two queries express exactly the same requirements::
?required=in:A,B,C
&required=X
&required=Y
&required=Z
?required=in:A,B,C
&required=X,Y,Z
.. note::
To ease the implementation we might decide to implement this API change in
the same microversion as `any-traits-in-allocation-candidates-query`_
implemented in.
Security impact
---------------
None
Notifications impact
--------------------
None
Other end user impact
---------------------
The osc-placement client plugin needs to be updated to support the new
Placement API microversion. This means the the CLI should support providing
the ``--required`` parameter more than once supporting both normal and any
trait syntax.
Performance Impact
------------------
None
Other deployer impact
---------------------
None
Developer impact
----------------
None
Upgrade impact
--------------
None
Implementation
==============
Assignee(s)
-----------
Primary assignee:
balazs-gibizer
Work Items
----------
* Extend the resource provider and allocation candidate DB query to support
more than one set of required traits
* Extend the Placement REST API with a new microversion that supports repeating
the ``required`` query param
* Extend the osc-placement client plugin to support the new microversion
Dependencies
============
* The `any-traits-in-allocation-candidates-query`_ spec
.. _`any-traits-in-allocation-candidates-query`: https://review.openstack.org/649992
Testing
=======
Both new gabbi and functional tests needs to be written for the Placement API
change. Also the osc-placement client plugin will need additional functional
test coverage.
Documentation Impact
====================
The Placement API reference needs to be updated.
References
==========
None
History
=======
.. list-table:: Revisions
:header-rows: 1
* - Release Name
- Description
* - Rocky
- Introduced
* - Stein
- Reproposed, approved but not implemented
* - Train
- Reproposed but not approved due to lack of focus
* - Yoga
- Reproposed
2005346-any-traits-in-allocation_candidates-query.rst 0000664 0000000 0000000 00000013562 15132464062 0036762 0 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/yoga/implemented ..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
=================================================
Support any traits in allocation_candidates query
=================================================
https://storyboard.openstack.org/#!/story/2005346
The ``GET /allocation_candidates`` request in Placement supports the
``required`` query parameter. If the caller specifies a list of traits in the
``required`` parameter then placement will limit the returned allocation
candidates to those RP trees that fulfill *every* traits in that list. To
support minimum bandwidth guarantees in Neutron + Nova we need to be able to
query allocation candidates that fulfill *at least one* trait from a list of
traits specified in the query. This is required for the case when a Neutron
network maps to more than one physnets but the port's bandwidth request can be
fulfilled from any physnet the port's network maps to.
Problem description
===================
Neutron through Nova needs to be able to query Placement for allocation
candidates that are matching to *at least one* trait from the list of traits
provided in the query.
Use Cases
---------
Neutron wants to use this any(traits) query to express that a port's bandwidth
resource request needs to be fulfilled by a Network device RP that is connected
to one of the physnets the network of the given port is connected to. With
Neutron's multiprovider network extension a single Neutron network can consist
of multiple network segments connected to different physnets.
Proposed change
===============
Extend the ``GET /allocation_candidates`` and ``GET /resource_providers``
requests with a new ``required=in:TRAIT1,TRAIT2`` query parameter syntax and
change the placement implementation to support this new syntax.
The `granular-resource-requests`_ spec proposes support for multiple request
groups in the Placement query identified by a positive integer postfix in the
``required`` query param. The new ``in:TRAIT1,TRAIT2`` syntax is applicable to
the ``required`` query params as well.
.. _`granular-resource-requests`: https://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/granular-resource-requests.html
Alternatives
------------
During the train review Sean suggested to use ``any``, ``all``, ``none``
instead of using the currently proposed ``in:`` syntax. However to keep the API
consistent we decided to continue using ``in:`` for traits as it is already
used for aggregates. Still we think that ``any``, ``all``, ``none`` would be a
better syntax but that requires a separate effort changing the existing query
syntax as well.
Data model impact
-----------------
None
REST API impact
---------------
Today the ``GET /allocation_candidates`` and ``GET /resource_providers`` query
support the ``required`` query param in the form of
``required=TRAIT1,TRAIT2,!TRAIT3``. This spec proposes to implement a new
microversion to allow the format of ``required=in:TRAIT1,TRAIT2`` as well
as the old format.
Each resource provider returned from a request having
``required=in:TRAIT1,TRAIT2`` should have *at least* one matching trait from
TRAIT1 and TRAIT2.
``required=in:TRAIT1,TRAIT2`` used in a ``GET /allocation_candidates`` query
means that the union of all the traits across all the providers in every
allocation candidate must contain at least one of T1, T2.
``requiredX=in:TRAIT1,TRAIT2`` used in a ``GET /allocation_candidates`` query
means that the resource provider that satisfies the requirement of the granular
request group ``X`` must also has at least one of T1, T2.
The response body of the ``GET /allocation_candidates`` and
``GET /resource_providers`` query are unchanged.
A separate subsequent spec will propose to support repeating the ``required``
query param more than once to allow mixing the two formats.
Note that mixing required and forbidden trait requirements in the same
``required=in:`` query param, like ``required=in:TRAIT1,!TRAIT2`` will not be
supported and will result a HTTP 400 response.
Security impact
---------------
None
Notifications impact
--------------------
None
Other end user impact
---------------------
The osc-placement client plugin needs to be updated to support the new
Placement API microversion. That plugin currently support the --required CLI
parameter accepting a list of traits. So this patch propose to extend that
parameter to accept in:TRAIT1,TRAIT2 format.
Performance Impact
------------------
None
Other deployer impact
---------------------
None
Developer impact
----------------
None
Upgrade impact
--------------
None
Implementation
==============
Assignee(s)
-----------
Primary assignee:
balazs-gibizer
Work Items
----------
* Extend the resource provider and allocation candidate DB query to support the
new type of query
* Extend the Placement REST API with a new microversion that supports the any
trait syntax
* Extend the osc-placement client plugin to support the new microversion
Dependencies
============
* the osc-placement client plugin can only be extended with the new
microversion support if every older microversion is already supported which
is not the case today.
Testing
=======
Both new gabbi and functional tests needs to be written for the Placement API
change. Also the osc-placement client plugin will need additional functional
test coverage.
Documentation Impact
====================
The Placement API reference needs to be updated.
References
==========
* osc-placement `review`_ series adding support for latest Placement
microversions
.. _`review`: https://review.openstack.org/#/c/548326
History
=======
.. list-table:: Revisions
:header-rows: 1
* - Release Name
- Description
* - Rocky
- Introduced
* - Stein
- Reproposed, approved but not implemented
* - Train
- Reproposed but not approved due to lack of focus
* - Yoga
- Reproposed
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/zed/ 0000775 0000000 0000000 00000000000 15132464062 0022633 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/zed/approved/ 0000775 0000000 0000000 00000000000 15132464062 0024453 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/specs/zed/approved/template.rst 0000777 0000000 0000000 00000000000 15132464062 0032006 2../../template.rst ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/user/ 0000775 0000000 0000000 00000000000 15132464062 0021712 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/source/user/index.rst 0000664 0000000 0000000 00000011126 15132464062 0023554 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
=================
Placement Usage
=================
Tracking Resources
==================
The placement service enables other projects to track their own resources.
Those projects can register/delete their own resources to/from placement
via the placement `HTTP API`_.
The placement service originated in the :nova-doc:`Nova project >`. As a
result much of the functionality in placement was driven by nova's
requirements. However, that functionality was designed to be sufficiently
generic to be used by any service that needs to manage the selection and
consumption of resources.
How Nova Uses Placement
-----------------------
Two processes, ``nova-compute`` and ``nova-scheduler``, host most of nova's
interaction with placement.
The nova resource tracker in ``nova-compute`` is responsible for `creating the
resource provider`_ record corresponding to the compute host on which the
resource tracker runs, `setting the inventory`_ that describes the quantitative
resources that are available for workloads to consume (e.g., ``VCPU``), and
`setting the traits`_ that describe qualitative aspects of the resources (e.g.,
``STORAGE_DISK_SSD``).
If other projects -- for example, Neutron or Cyborg -- wish to manage resources
on a compute host, they should create resource providers as children of the
compute host provider and register their own managed resources as inventory on
those child providers. For more information, see the
:doc:`Modeling with Provider Trees `.
The ``nova-scheduler`` is responsible for selecting a set of suitable
destination hosts for a workload. It begins by formulating a request to
placement for a list of `allocation candidates`_. That request expresses
quantitative and qualitative requirements, membership in aggregates, and in
more complex cases, the topology of related resources. That list is reduced and
ordered by filters and weighers within the scheduler process. An `allocation`_
is made against a resource provider representing a destination, consuming a
portion of the inventory set by the resource tracker.
.. toctree::
:hidden:
provider-tree
.. _HTTP API: https://docs.openstack.org/api-ref/placement/
.. _creating the resource provider: https://docs.openstack.org/api-ref/placement/?expanded=create-resource-provider-detail#create-resource-provider
.. _setting the inventory: https://docs.openstack.org/api-ref/placement/?expanded=update-resource-provider-inventories-detail#update-resource-provider-inventories
.. _setting the traits: https://docs.openstack.org/api-ref/placement/?expanded=update-resource-provider-traits-detail#update-resource-provider-traits
.. _allocation candidates: https://docs.openstack.org/api-ref/placement/?expanded=list-allocation-candidates-detail#list-allocation-candidates
.. _allocation: https://docs.openstack.org/api-ref/placement/?expanded=update-allocations-detail#update-allocations
REST API
========
The placement API service provides a well-documented, JSON-based `HTTP API`_
and data model. It is designed to be easy to use from whatever HTTP client is
suitable. There is a plugin to the openstackclient_ command line tool called
osc-placement_ which is useful for occasional inspection and manipulation of
the resources in the placement service.
.. _HTTP API: https://docs.openstack.org/api-ref/placement/
.. _openstackclient: https://pypi.org/project/openstackclient/
.. _osc-placement: https://pypi.org/project/osc-placement/
Microversions
-------------
The placement API uses microversions for making incremental changes to the
API which client requests must opt into.
It is especially important to keep in mind that nova-compute is a client of
the placement REST API and based on how Nova supports rolling upgrades the
nova-compute service could be Newton level code making requests to an Ocata
placement API, and vice-versa, an Ocata compute service in a cells v2 cell
could be making requests to a Newton placement API.
This history of placement microversions may be found in the following
subsection.
.. toctree::
:maxdepth: 2
../placement-api-microversion-history
placement-14.0.0+git20260116.35.cd24dcb5/doc/source/user/provider-tree.rst 0000664 0000000 0000000 00000072645 15132464062 0025251 0 ustar 00root root 0000000 0000000 ..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
==============================
Modeling with Provider Trees
==============================
Overview
========
Placement supports modeling a hierarchical relationship between different
resource providers. While a parent provider can have multiple child providers,
a child provider can belong to only one parent provider. Therefore, the whole
architecture can be considered as a "tree" structure, and the resource provider
on top of the "tree" is called a "root provider". (See the
`Nested Resource Providers`_ spec for details.)
Modeling the relationship is done by specifying a parent provider via the
`POST /resource_providers`_ operation when creating a resource provider.
.. note:: If the parent provider hasn't been set, you can also parent a
resource provider after the creation via the
`PUT /resource_providers/{uuid}`_ operation. But re-parenting a
resource provider is not supported.
The resource providers in a tree -- and sharing providers as described in the
next section -- can be returned in a single allocation request in the response
of the `GET /allocation_candidates`_ operation. This means that the placement
service looks up a resource provider tree in which resource providers can
*collectively* contain all of the requested resources.
This document describes some case studies to explain how sharing providers,
aggregates, and traits work if provider trees are involved in the
`GET /allocation_candidates`_ operation.
Sharing Resource Providers
==========================
Resources on sharing resource providers can be shared by multiple resource
provider trees. This means that a sharing provider can be in one allocation
request with resource providers from a different tree in the response of the
`GET /allocation_candidates`_ operation. As an example, this may be used for
shared storage that is connected to multiple compute hosts.
.. note:: Technically, a resource provider with the
``MISC_SHARES_VIA_AGGREGATE`` trait becomes a sharing resource
provider and the resources on it are shared by other resource
providers in the same aggregate.
For example, let's say we have the following environment::
+-------------------------------+ +-------------------------------+
| Sharing Storage (SS1) | | Sharing Storage (SS2) |
| resources: | | resources: |
| DISK_GB: 1000 | | DISK_GB: 1000 |
| aggregate: [aggA] | | aggregate: [] |
| trait: | | trait: |
| [MISC_SHARES_VIA_AGGREGATE] | | [MISC_SHARES_VIA_AGGREGATE] |
+---------------+---------------+ +-------------------------------+
| Shared via aggA
+-----------+-----------+ +-----------------------+
| Compute Node (CN1) | | Compute Node (CN2) |
| resources: | | resources: |
| VCPU: 8 | | VCPU: 8 |
| MEMORY_MB: 1024 | | MEMORY_MB: 1024 |
| DISK_GB: 1000 | | DISK_GB: 1000 |
| aggregate: [aggA] | | aggregate: [] |
| trait: [] | | trait: [] |
+-----------------------+ +-----------------------+
Assuming no allocations have yet been made against any of the resource
providers, the request::
GET /allocation_candidates?resources=VCPU:1,MEMORY_MB:512,DISK_GB:500
would return three combinations as the allocation candidates.
1. ``CN1`` (``VCPU``, ``MEMORY_MB``, ``DISK_GB``)
2. ``CN2`` (``VCPU``, ``MEMORY_MB``, ``DISK_GB``)
3. ``CN1`` (``VCPU``, ``MEMORY_MB``) + ``SS1`` (``DISK_GB``)
``SS2`` is also a sharing provider, but not in the allocation candidates because
it can't satisfy the resource itself and it isn't in any aggregate, so it is
not shared by any resource providers.
When a provider tree structure is present, sharing providers are shared by
the whole tree if one of the resource providers from the tree is connected to
the sharing provider via an aggregate.
For example, let's say we have the following environment where NUMA resource
providers are child providers of the compute host resource providers::
+------------------------------+
| Sharing Storage (SS1) |
| resources: |
| DISK_GB: 1000 |
| agg: [aggA] |
| trait: |
| [MISC_SHARES_VIA_AGGREGATE]|
+--------------+---------------+
| aggA
+--------------------------------+ | +--------------------------------+
| +--------------------------+ | | | +--------------------------+ |
| | Compute Node (CN1) | | | | | Compute Node (CN2) | |
| | resources: +-----+-----+ resources: | |
| | MEMORY_MB: 1024 | | | | MEMORY_MB: 1024 | |
| | DISK_GB: 1000 | | | | DISK_GB: 1000 | |
| | agg: [aggA, aggB] | | | | agg: [aggA] | |
| +-----+-------------+------+ | | +-----+-------------+------+ |
| | nested | nested | | | nested | nested |
| +-----+------+ +----+------+ | | +-----+------+ +----+------+ |
| | NUMA1_1 | | NUMA1_2 | | | | NUMA2_1 | | NUMA2_2 | |
| | VCPU: 8 | | VCPU: 8 | | | | VCPU: 8 | | VCPU: 8 | |
| | agg:[] | | agg:[] | | | | agg:[aggB]| | agg:[] | |
| +------------+ +-----------+ | | +------------+ +-----------+ |
+--------------------------------+ +--------------------------------+
Assuming no allocations have yet been made against any of the resource
providers, the request::
GET /allocation_candidates?resources=VCPU:1,MEMORY_MB:512,DISK_GB:500
would return eight combinations as the allocation candidates.
1. ``NUMA1_1`` (``VCPU``) + ``CN1`` (``MEMORY_MB``, ``DISK_GB``)
2. ``NUMA1_2`` (``VCPU``) + ``CN1`` (``MEMORY_MB``, ``DISK_GB``)
3. ``NUMA2_1`` (``VCPU``) + ``CN2`` (``MEMORY_MB``, ``DISK_GB``)
4. ``NUMA2_2`` (``VCPU``) + ``CN2`` (``MEMORY_MB``, ``DISK_GB``)
5. ``NUMA1_1`` (``VCPU``) + ``CN1`` (``MEMORY_MB``) + ``SS1`` (``DISK_GB``)
6. ``NUMA1_2`` (``VCPU``) + ``CN1`` (``MEMORY_MB``) + ``SS1`` (``DISK_GB``)
7. ``NUMA2_1`` (``VCPU``) + ``CN2`` (``MEMORY_MB``) + ``SS1`` (``DISK_GB``)
8. ``NUMA2_2`` (``VCPU``) + ``CN2`` (``MEMORY_MB``) + ``SS1`` (``DISK_GB``)
Note that ``NUMA1_1`` and ``SS1``, for example, are not in the same aggregate,
but they can be in one allocation request since the tree of ``CN1`` is
connected to ``SS1`` via aggregate A on ``CN1``.
Filtering Aggregates
====================
What differs between the ``CN1`` and ``CN2`` in the example above emerges when you
specify the aggregate explicitly in the `GET /allocation_candidates`_ operation
with the ``member_of`` query parameter. The ``member_of`` query parameter
accepts aggregate uuids and filters candidates to the resource providers in the
given aggregate. See the `Filtering by Aggregate Membership`_ spec for details.
Note that the `GET /allocation_candidates`_ operation assumes that "an
aggregate on a root provider spans the whole tree, while an aggregate on a
non-root provider does NOT span the whole tree."
For example, in the environment above, the request::
GET /allocation_candidates?resources=VCPU:1,MEMORY_MB:512,DISK_GB:500&member_of=
would return eight candidates,
1. ``NUMA1_1`` (``VCPU``) + ``CN1`` (``MEMORY_MB``, ``DISK_GB``)
2. ``NUMA1_2`` (``VCPU``) + ``CN1`` (``MEMORY_MB``, ``DISK_GB``)
3. ``NUMA2_1`` (``VCPU``) + ``CN2`` (``MEMORY_MB``, ``DISK_GB``)
4. ``NUMA2_2`` (``VCPU``) + ``CN2`` (``MEMORY_MB``, ``DISK_GB``)
5. ``NUMA1_1`` (``VCPU``) + ``CN1`` (``MEMORY_MB``) + ``SS1`` (``DISK_GB``)
6. ``NUMA1_2`` (``VCPU``) + ``CN1`` (``MEMORY_MB``) + ``SS1`` (``DISK_GB``)
7. ``NUMA2_1`` (``VCPU``) + ``CN2`` (``MEMORY_MB``) + ``SS1`` (``DISK_GB``)
8. ``NUMA2_2`` (``VCPU``) + ``CN2`` (``MEMORY_MB``) + ``SS1`` (``DISK_GB``)
This is because aggregate A is on the root providers, ``CN1`` and ``CN2``, so
the API assumes the child providers ``NUMA1_1``, ``NUMA1_2``, ``NUMA2_1`` and
``NUMA2_2`` are also in the aggregate A.
Specifying aggregate B::
GET /allocation_candidates?resources=VCPU:1,MEMORY_MB:512,DISK_GB:500&member_of=
would return two candidates.
1. ``NUMA1_1`` (``VCPU``) + ``CN1`` (``MEMORY_MB``, ``DISK_GB``)
2. ``NUMA1_2`` (``VCPU``) + ``CN1`` (``MEMORY_MB``, ``DISK_GB``)
This is because ``SS1`` is not in aggregate B, and because aggregate B on
``NUMA2_1`` doesn't span the whole tree since the ``NUMA2_1`` resource
provider isn't a root resource provider.
Filtering by Traits
===================
Traits are not only used to indicate sharing providers. They are used to denote
capabilities of resource providers. (See `The Traits API`_ spec for details.)
Traits can be requested explicitly in the `GET /allocation_candidates`_
operation with the ``required`` query parameter, but traits on resource
providers never span other resource providers. If a trait is requested, one of
the resource providers that appears in the allocation candidate should have
the trait regardless of sharing or nested providers. See the `Request Traits`_
spec for details. The ``required`` query parameter also supports negative
expression, via the ``!`` prefix, for forbidden traits. If a forbidden trait
is specified, none of the resource providers that appear in the allocation
candidate may have that trait. See the `Forbidden Traits`_ spec for details.
The ``required`` parameter also supports the syntax ``in:T1,T2,...`` which
means we are looking for resource providers that have either T1 or T2 traits on
them. The two trait query syntax can be combined by repeating the ``required``
query parameter. So querying providers having (T1 or T2) and T3 and not T4 can
be expressed with ``required=in:T1,T2&required=T3,!T4``.
For example, let's say we have the following environment::
+----------------------------------------------------+
| +----------------------------------------------+ |
| | Compute Node (CN1) | |
| | resources: | |
| | VCPU: 8, MEMORY_MB: 1024, DISK_GB: 1000 | |
| | trait: [] | |
| +----------+------------------------+----------+ |
| | nested | nested |
| +----------+-----------+ +----------+----------+ |
| | NIC1_1 | | NIC1_2 | |
| | resources: | | resources: | |
| | SRIOV_NET_VF:8 | | SRIOV_NET_VF:8 | |
| | trait: | | trait: | |
| | [HW_NIC_ACCEL_SSL]| | [] | |
| +----------------------+ +---------------------+ |
+----------------------------------------------------+
Assuming no allocations have yet been made against any of the resource
providers, the request::
GET /allocation_candidates?resources=VCPU:1,MEMORY_MB:512,DISK_GB:500,SRIOV_NET_VF:2
&required=HW_NIC_ACCEL_SSL
would return only ``NIC1_1`` for ``SRIOV_NET_VF``. As a result, we get one
candidate.
1. ``CN1`` (``VCPU``, ``MEMORY_MB``, ``DISK_GB``) + ``NIC1_1`` (``SRIOV_NET_VF``)
In contrast, for forbidden traits::
GET /allocation_candidates?resources=VCPU:1,MEMORY_MB:512,DISK_GB:500,SRIOV_NET_VF:2
&required=!HW_NIC_ACCEL_SSL
would exclude ``NIC1_1`` for ``SRIOV_NET_VF``.
1. ``CN1`` (``VCPU``, ``MEMORY_MB``, ``DISK_GB``) + ``NIC1_2`` (``SRIOV_NET_VF``)
If the trait is not in the ``required`` parameter, that trait will simply be
ignored in the `GET /allocation_candidates`_ operation.
For example::
GET /allocation_candidates?resources=VCPU:1,MEMORY_MB:512,DISK_GB:500,SRIOV_NET_VF:2
would return two candidates.
1. ``CN1`` (``VCPU``, ``MEMORY_MB``, ``DISK_GB``) + ``NIC1_1`` (``SRIOV_NET_VF``)
2. ``CN1`` (``VCPU``, ``MEMORY_MB``, ``DISK_GB``) + ``NIC1_2`` (``SRIOV_NET_VF``)
Granular Resource Requests
==========================
If you want to get the same kind of resources from multiple resource providers
at once, or if you require a provider of a particular requested resource
class to have a specific trait or aggregate membership, you can use the
`Granular Resource Request`_ feature.
This feature is enabled by numbering the ``resources``, ``member_of`` and
``required`` query parameters respectively.
For example, in the environment above, the request::
GET /allocation_candidates?resources=VCPU:1,MEMORY_MB:512,DISK_GB:500
&resources1=SRIOV_NET_VF:1&required1=HW_NIC_ACCEL_SSL
&resources2=SRIOV_NET_VF:1
&group_policy=isolate
would return one candidate where two providers serve ``SRIOV_NET_VF`` resource.
1. ``CN1`` (``VCPU``, ``MEMORY_MB``, ``DISK_GB``) + ``NIC1_1`` (``SRIOV_NET_VF:1``) + ``NIC1_2`` (``SRIOV_NET_VF:1``)
The ``group_policy=isolate`` ensures that the one resource is from a provider
with the ``HW_NIC_ACCEL_SSL`` trait and the other is from *another* provider
with no trait constraints.
If the ``group_policy`` is set to ``none``, it allows multiple granular
requests to be served by one provider. Namely::
GET /allocation_candidates?resources=VCPU:1,MEMORY_MB:512,DISK_GB:500
&resources1=SRIOV_NET_VF:1&required1=HW_NIC_ACCEL_SSL
&resources2=SRIOV_NET_VF:1
&group_policy=none
would return two candidates.
1. ``CN1`` (``VCPU``, ``MEMORY_MB``, ``DISK_GB``) + ``NIC1_1`` (``SRIOV_NET_VF:1``) + ``NIC1_2`` (``SRIOV_NET_VF:1``)
2. ``CN1`` (``VCPU``, ``MEMORY_MB``, ``DISK_GB``) + ``NIC1_1`` (``SRIOV_NET_VF:2``)
This is because ``NIC1_1`` satisfies both request 1 (with ``HW_NIC_ACCEL_SSL``
trait) and request 2 (with no trait constraints).
Note that if ``member_of`` is specified in granular requests, the API
doesn't assume that "an aggregate on a root provider spans the whole tree."
It just sees whether the specified aggregate is directly associated with the
resource provider when looking up the candidates.
Filtering by Tree
=================
If you want to filter the result by a specific provider tree, use the
`Filter Allocation Candidates by Provider Tree`_ feature with the ``in_tree``
query parameter. For example, let's say we have the following environment::
+-----------------------+ +-----------------------+
| Sharing Storage (SS1) | | Sharing Storage (SS2) |
| DISK_GB: 1000 | | DISK_GB: 1000 |
+-----------+-----------+ +-----------+-----------+
| |
+-----------------+----------------+
| Shared via an aggregate
+-----------------+----------------+
| |
+--------------|---------------+ +--------------|--------------+
| +------------+-------------+ | | +------------+------------+ |
| | Compute Node (CN1) | | | | Compute Node (CN2) | |
| | DISK_GB: 1000 | | | | DISK_GB: 1000 | |
| +-----+-------------+------+ | | +----+-------------+------+ |
| | nested | nested | | | nested | nested |
| +-----+------+ +----+------+ | | +----+------+ +----+------+ |
| | NUMA1_1 | | NUMA1_2 | | | | NUMA2_1 | | NUMA2_2 | |
| | VCPU: 4 | | VCPU: 4 | | | | VCPU: 4 | | VCPU: 4 | |
| +------------+ +-----------+ | | +-----------+ +-----------+ |
+------------------------------+ +-----------------------------+
The request::
GET /allocation_candidates?resources=VCPU:1,DISK_GB:50&in_tree=
will filter out candidates by ``CN1`` and return 2 combinations of allocation
candidates.
1. ``NUMA1_1`` (``VCPU``) + ``CN1`` (``DISK_GB``)
2. ``NUMA1_2`` (``VCPU``) + ``CN1`` (``DISK_GB``)
The specified tree can be a non-root provider. The request::
GET /allocation_candidates?resources=VCPU:1,DISK_GB:50&in_tree=
will return the same result being aware of resource providers in the same tree
with ``NUMA1_1`` resource provider.
1. ``NUMA1_1`` (``VCPU``) + ``CN1`` (``DISK_GB``)
2. ``NUMA1_2`` (``VCPU``) + ``CN1`` (``DISK_GB``)
.. note::
We don't exclude ``NUMA1_2`` in the case above. That kind of feature is
proposed separately and in progress. See the `Support subtree filter`_
specification for details.
The suffixed syntax ``in_tree<$S>`` (where ``$S`` is a number in microversions
``1.25-1.32`` and ``[a-zA-Z0-9_-]{1,64}`` from ``1.33``) is also supported
according to `Granular Resource Requests`_. This restricts providers satisfying
the suffixed granular request group to the tree of the specified provider.
For example, in the environment above, when you want to have ``VCPU`` from
``CN1`` and ``DISK_GB`` from wherever, the request may look like::
GET /allocation_candidates?resources=VCPU:1&in_tree=
&resources1=DISK_GB:10
which will return the sharing providers as well as the local disk.
1. ``NUMA1_1`` (``VCPU``) + ``CN1`` (``DISK_GB``)
2. ``NUMA1_2`` (``VCPU``) + ``CN1`` (``DISK_GB``)
3. ``NUMA1_1`` (``VCPU``) + ``SS1`` (``DISK_GB``)
4. ``NUMA1_2`` (``VCPU``) + ``SS1`` (``DISK_GB``)
5. ``NUMA1_1`` (``VCPU``) + ``SS2`` (``DISK_GB``)
6. ``NUMA1_2`` (``VCPU``) + ``SS2`` (``DISK_GB``)
This is because the unsuffixed ``in_tree`` is applied to only the unsuffixed
resource of ``VCPU``, and not applied to the suffixed resource, ``DISK_GB``.
When you want to have ``VCPU`` from wherever and ``DISK_GB`` from ``SS1``,
the request may look like::
GET /allocation_candidates?resources=VCPU:1
&resources1=DISK_GB:10&in_tree1=
which will stick to the first sharing provider for ``DISK_GB``.
1. ``NUMA1_1`` (``VCPU``) + ``SS1`` (``DISK_GB``)
2. ``NUMA1_2`` (``VCPU``) + ``SS1`` (``DISK_GB``)
3. ``NUMA2_1`` (``VCPU``) + ``SS1`` (``DISK_GB``)
4. ``NUMA2_2`` (``VCPU``) + ``SS1`` (``DISK_GB``)
When you want to have ``VCPU`` from ``CN1`` and ``DISK_GB`` from ``SS1``,
the request may look like::
GET /allocation_candidates?resources1=VCPU:1&in_tree1=
&resources2=DISK_GB:10&in_tree2=
&group_policy=isolate
which will return only 2 candidates.
1. ``NUMA1_1`` (``VCPU``) + ``SS1`` (``DISK_GB``)
2. ``NUMA1_2`` (``VCPU``) + ``SS1`` (``DISK_GB``)
.. _`filtering by root provider traits`:
Filtering by Root Provider Traits
=================================
When traits are associated with a particular resource, the provider tree should
be constructed such that the traits are associated with the provider possessing
the inventory of that resource. For example, trait ``HW_CPU_X86_AVX2`` is a
trait associated with the ``VCPU`` resource, so it should be placed on the
resource provider with ``VCPU`` inventory, wherever that provider is positioned
in the tree structure. (A NUMA-aware host may model ``VCPU`` inventory in a
child provider, whereas a non-NUMA-aware host may model it in the root
provider.)
On the other hand, some traits are associated not with a resource, but with the
provider itself. For example, a compute host may be capable of
``COMPUTE_VOLUME_MULTI_ATTACH``, or be associated with a
``CUSTOM_WINDOWS_LICENSE_POOL``. In this case it is recommended that the root
resource provider be used to represent the concept of the "compute host"; so
these kinds of traits should always be placed on the root resource provider.
The following environment illustrates the above concepts::
+---------------------------------+ +-------------------------------------------+
|+-------------------------------+| | +-------------------------------+ |
|| Compute Node (NON_NUMA_CN) || | | Compute Node (NUMA_CN) | |
|| VCPU: 8, || | | DISK_GB: 1000 | |
|| MEMORY_MB: 1024 || | | traits: | |
|| DISK_GB: 1000 || | | STORAGE_DISK_SSD, | |
|| traits: || | | COMPUTE_VOLUME_MULTI_ATTACH | |
|| HW_CPU_X86_AVX2, || | +-------+-------------+---------+ |
|| STORAGE_DISK_SSD, || | nested | | nested |
|| COMPUTE_VOLUME_MULTI_ATTACH, || |+-----------+-------+ +---+---------------+|
|| CUSTOM_WINDOWS_LICENSE_POOL || || NUMA1 | | NUMA2 ||
|+-------------------------------+| || VCPU: 4 | | VCPU: 4 ||
+---------------------------------+ || MEMORY_MB: 1024 | | MEMORY_MB: 1024 ||
|| | | traits: ||
|| | | HW_CPU_X86_AVX2 ||
|+-------------------+ +-------------------+|
+-------------------------------------------+
A tree modeled in this fashion can take advantage of the `root_required`_
query parameter to return only allocation candidates from trees which possess
(or do not possess) specific traits on their root provider. For example,
to return allocation candidates including ``VCPU`` with the ``HW_CPU_X86_AVX2``
instruction set from hosts capable of ``COMPUTE_VOLUME_MULTI_ATTACH``, a
request may look like::
GET /allocation_candidates
?resources1=VCPU:1,MEMORY_MB:512&required1=HW_CPU_X86_AVX2
&resources2=DISK_GB:100
&group_policy=none
&root_required=COMPUTE_VOLUME_MULTI_ATTACH
This will return results from both ``NUMA_CN`` and ``NON_NUMA_CN`` because
both have the ``COMPUTE_VOLUME_MULTI_ATTACH`` trait on the root provider; but
only ``NUMA2`` has ``HW_CPU_X86_AVX2`` so there will only be one result from
``NUMA_CN``.
1. ``NON_NUMA_CN`` (``VCPU``, ``MEMORY_MB``, ``DISK_GB``)
2. ``NUMA_CN`` (``DISK_GB``) + ``NUMA2`` (``VCPU``, ``MEMORY_MB``)
To restrict allocation candidates to only those not in your
``CUSTOM_WINDOWS_LICENSE_POOL``, a request may look like::
GET /allocation_candidates
?resources1=VCPU:1,MEMORY_MB:512
&resources2=DISK_GB:100
&group_policy=none
&root_required=!CUSTOM_WINDOWS_LICENSE_POOL
This will return results only from ``NUMA_CN`` because ``NON_NUMA_CN`` has the
forbidden ``CUSTOM_WINDOWS_LICENSE_POOL`` on the root provider.
1. ``NUMA_CN`` (``DISK_GB``) + ``NUMA1`` (``VCPU``, ``MEMORY_MB``)
2. ``NUMA_CN`` (``DISK_GB``) + ``NUMA2`` (``VCPU``, ``MEMORY_MB``)
The syntax of the ``root_required`` query parameter is identical to that of
``required[$S]``: multiple trait strings may be specified, separated by commas,
each optionally prefixed with ``!`` to indicate that it is forbidden.
.. note:: ``root_required`` may not be suffixed, and may be specified only
once, as it applies only to the root provider.
.. note:: When sharing providers are involved in the request, ``root_required``
applies only to the root of the non-sharing provider tree.
.. note:: While the ``required`` param supports the any-traits query with the
``in:`` prefix syntax since microversion 1.39 the ``root_required``
parameter does not support it yet.
Filtering by Same Subtree
=========================
If you want to express affinity among allocations in separate request groups,
use the `same_subtree`_ query parameter. It accepts a comma-separated list of
request group suffix strings ($S). Each must exactly match a suffix on a
granular group somewhere else in the request. If this is provided, at least one
of the resource providers satisfying a specified request group must be an
ancestor of the rest.
For example, given a model like::
+---------------------------+
| Compute Node (CN) |
+-------------+-------------+
|
+--------------------+-------------------+
| |
+-----------+-----------+ +-----------+-----------+
| NUMA NODE (NUMA0) | | NUMA NODE (NUMA1) |
| VCPU: 4 | | VCPU: 4 |
| MEMORY_MB: 2048 | | MEMORY_MB: 2048 |
| traits: | | traits: |
| HW_NUMA_ROOT | | HW_NUMA_ROOT |
+-----------+-----------+ +----+-------------+----+
| | |
+-----------+-----------+ +----------------+-----+ +-----+----------------+
| FPGA (FPGA0_0) | | FPGA (FPGA1_0) | | FPGA (FPGA1_1) |
| ACCELERATOR_FPGA:1 | | ACCELERATOR_FPGA:1 | | ACCELERATOR_FPGA:1 |
| traits: | | traits: | | traits: |
| CUSTOM_TYPE1 | | CUSTOM_TYPE1 | | CUSTOM_TYPE2 |
+-----------------------+ +----------------------+ +----------------------+
To request FPGAs on the same NUMA node with VCPUs and MEMORY, a request may
look like::
GET /allocation_candidates
?resources_COMPUTE=VCPU:1,MEMORY_MB:256
&resources_ACCEL=ACCELERATOR_FPGA:1
&group_policy=none
&same_subtree=_COMPUTE,_ACCEL
This will produce candidates including:
1. ``NUMA0`` (``VCPU``, ``MEMORY_MB``) + ``FPGA0_0`` (``ACCELERATOR_FPGA``)
2. ``NUMA1`` (``VCPU``, ``MEMORY_MB``) + ``FPGA1_0`` (``ACCELERATOR_FPGA``)
3. ``NUMA1`` (``VCPU``, ``MEMORY_MB``) + ``FPGA1_1`` (``ACCELERATOR_FPGA``)
but not:
4. ``NUMA0`` (``VCPU``, ``MEMORY_MB``) + ``FPGA1_0`` (``ACCELERATOR_FPGA``)
5. ``NUMA0`` (``VCPU``, ``MEMORY_MB``) + ``FPGA1_1`` (``ACCELERATOR_FPGA``)
6. ``NUMA1`` (``VCPU``, ``MEMORY_MB``) + ``FPGA0_0`` (``ACCELERATOR_FPGA``)
The request groups specified in the ``same_subtree`` need not have a
resources$S. For example, to request 2 FPGAs with different traits on the same
NUMA node, a request may look like::
GET /allocation_candidates
?required_NUMA=HW_NUMA_ROOT
&resources_ACCEL1=ACCELERATOR_FPGA:1
&required_ACCEL1=CUSTOM_TYPE1
&resources_ACCEL2=ACCELERATOR_FPGA:1
&required_ACCEL2=CUSTOM_TYPE2
&group_policy=none
&same_subtree=_NUMA,_ACCEL1,_ACCEL2
This will produce candidates including:
1. ``FPGA1_0`` (``ACCELERATOR_FPGA``) + ``FPGA1_1`` (``ACCELERATOR_FPGA``) + ``NUMA1``
but not:
2. ``FPGA0_0`` (``ACCELERATOR_FPGA``) + ``FPGA1_1`` (``ACCELERATOR_FPGA``) + ``NUMA0``
3. ``FPGA0_0`` (``ACCELERATOR_FPGA``) + ``FPGA1_1`` (``ACCELERATOR_FPGA``) + ``NUMA1``
4. ``FPGA1_0`` (``ACCELERATOR_FPGA``) + ``FPGA1_1`` (``ACCELERATOR_FPGA``) + ``NUMA0``
The resource provider that satisfies the resourceless request group
``?required_NUMA=HW_NUMA_ROOT``, ``NUMA1`` in the first example above, will
not be in the ``allocation_request`` field of the response, but is shown in
the ``mappings`` field.
The ``same_subtree`` query parameter can be repeated and each repeat group is
treated independently.
.. _`Nested Resource Providers`: https://specs.openstack.org/openstack/nova-specs/specs/queens/approved/nested-resource-providers.html
.. _`POST /resource_providers`: https://docs.openstack.org/api-ref/placement/#create-resource-provider
.. _`PUT /resource_providers/{uuid}`: https://docs.openstack.org/api-ref/placement/#update-resource-provider
.. _`GET /allocation_candidates`: https://docs.openstack.org/api-ref/placement/#list-allocation-candidates
.. _`Filtering by Aggregate Membership`: https://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/alloc-candidates-member-of.html
.. _`The Traits API`: http://specs.openstack.org/openstack/nova-specs/specs/pike/implemented/resource-provider-traits.html
.. _`Request Traits`: https://specs.openstack.org/openstack/nova-specs/specs/queens/implemented/request-traits-in-nova.html
.. _`Forbidden Traits`: https://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/placement-forbidden-traits.html
.. _`Granular Resource Request`: https://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/granular-resource-requests.html
.. _`Filter Allocation Candidates by Provider Tree`: https://specs.openstack.org/openstack/nova-specs/specs/stein/implemented/alloc-candidates-in-tree.html
.. _`Support subtree filter`: https://review.opendev.org/#/c/595236/
.. _`root_required`: https://docs.openstack.org/placement/latest/specs/train/approved/2005575-nested-magic-1.html#root-required
.. _`same_subtree`: https://docs.openstack.org/placement/latest/specs/train/approved/2005575-nested-magic-1.html#same-subtree
placement-14.0.0+git20260116.35.cd24dcb5/doc/test/ 0000775 0000000 0000000 00000000000 15132464062 0020413 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/doc/test/redirect-tests.txt 0000664 0000000 0000000 00000001317 15132464062 0024117 0 ustar 00root root 0000000 0000000 /placement/latest/specs/train/approved/2005297-negative-aggregate-membership.html 301 /placement/latest/specs/train/implemented/2005297-negative-aggregate-membership.html
/placement/latest/specs/train/approved/placement-resource-provider-request-group-mapping-in-allocation-candidates.html 301 /placement/latest/specs/train/implemented/placement-resource-provider-request-group-mapping-in-allocation-candidates.html
/placement/latest/specs/train/approved/2005575-nested-magic-1.html 301 /placement/latest/specs/train/implemented/2005575-nested-magic-1.html
/placement/latest/usage/index.html 301 /placement/latest/user/index.html
/placement/latest/usage/provider-tree.html 301 /placement/latest/user/provider-tree.html
placement-14.0.0+git20260116.35.cd24dcb5/etc/ 0000775 0000000 0000000 00000000000 15132464062 0017442 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/etc/placement/ 0000775 0000000 0000000 00000000000 15132464062 0021412 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/etc/placement/README.rst 0000664 0000000 0000000 00000001430 15132464062 0023077 0 ustar 00root root 0000000 0000000 Sample policy and config files
==============================
This directory contains sample ``placement.conf`` and ``policy.yaml`` files.
Sample Config
-------------
To generate the sample ``placement.conf`` file, run the following command from
the top level of the placement directory::
tox -e genconfig
For a pre-generated example of the latest ``placement.conf``, see:
https://docs.openstack.org/placement/latest/configuration/sample-config.html
Sample Policy
-------------
To generate the sample ``policy.yaml`` file, run the following command from the
top level of the placement directory::
tox -e genpolicy
For a pre-generated example of the latest placement ``policy.yaml``, see:
https://docs.openstack.org/placement/latest/configuration/sample-policy.html
placement-14.0.0+git20260116.35.cd24dcb5/etc/placement/config-generator.conf 0000664 0000000 0000000 00000000675 15132464062 0025522 0 ustar 00root root 0000000 0000000 [DEFAULT]
output_file = etc/placement/placement.conf.sample
wrap_width = 80
namespace = placement.conf
namespace = keystonemiddleware.auth_token
namespace = oslo.log
namespace = oslo.middleware.cors
namespace = oslo.middleware.http_proxy_to_wsgi
namespace = oslo.policy
namespace = osprofiler
# FIXME(mriedem): There are likely other missing 3rd party oslo library
# options that should show up in the placement.conf docs, like oslo.concurrency
placement-14.0.0+git20260116.35.cd24dcb5/etc/placement/policy-generator.conf 0000664 0000000 0000000 00000000117 15132464062 0025543 0 ustar 00root root 0000000 0000000 [DEFAULT]
output_file = etc/placement/policy.yaml.sample
namespace = placement
placement-14.0.0+git20260116.35.cd24dcb5/gate/ 0000775 0000000 0000000 00000000000 15132464062 0017607 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/gate/README 0000664 0000000 0000000 00000001250 15132464062 0020465 0 ustar 00root root 0000000 0000000 This directory contains files used by the OpenStack infra test system. They are
really only relevant within the scope of the OpenStack infra system and are not
expected to be useful to anyone else.
These files are a mixture of:
* Hooks and other scripts to be used by the OpenStack infra test system. These
scripts may be called by certain jobs at important times to do extra testing,
setup, run services, etc.
* "gabbits" are test files to be used with some of the jobs described in
.zuul.yaml and playbooks. When changes are made in the gabbits or playbooks
it is quite likely that queries in the playbooks or the assertions in the
gabbits will need to be updated.
placement-14.0.0+git20260116.35.cd24dcb5/gate/gabbits/ 0000775 0000000 0000000 00000000000 15132464062 0021222 5 ustar 00root root 0000000 0000000 placement-14.0.0+git20260116.35.cd24dcb5/gate/gabbits/nested-perfload.yaml 0000664 0000000 0000000 00000013611 15132464062 0025164 0 ustar 00root root 0000000 0000000 # This is a nested topology to exercise a large section of the nested provider
# related code in placement. The structure here is based on some of the
# structures in the NUMANetworkFixture in
# placement.tests.functional.fixtures.gabbits. This version initially leaves
# out many of the resource providers created there, with the intent that we can
# add more as the need presents itself.
#
# For the time being only one compute node is created, with two numa nodes,
# each with two devices attached, either two FPGA or an FPGA and PGPU.
#
# Here's a graphical representation of what is created. Please keep this up to
# date as changes are made:
#
# +-----------------------------+
# | compute node (cn1) |
# | COMPUTE_VOLUME_MULTI_ATTACH |
# | DISK_GB: 20480 |
# +---------------+-------------+
# |
# +--------------------+
# | |
# +---------+--------+ +---------+--------+
# | numa0 | | numa1 |
# | HW_NUMA_ROOT | | HW_NUMA_ROOT |
# | | | CUSTOM_FOO |
# | VCPU: 4 (2 res.) | | VCPU: 4 |
# | MEMORY_MB: 2048 | | MEMORY_MB: 2048 |
# | min_unit: 512 | | min_unit: 256 |
# | step_size: 256 | | max_unit: 1024 |
# +---+----------+---+ +---+----------+---+
# | | | |
# +---+---+ +---+---+ +---+---+ +---+---+
# |fpga0 | |pgpu0 | |fpga1_0| |fpga1_1|
# |FPGA:1 | |VGPU:8 | |FPGA:1 | |FPGA:1 |
# +-------+ +-------+ +-------+ +-------+
defaults:
request_headers:
accept: application/json
content-type: application/json
openstack-api-version: placement latest
x-auth-token: $ENVIRON['TOKEN']
tests:
- name: create FOO trait
PUT: /traits/CUSTOM_FOO
status: 201 || 204
- name: create cn1
POST: /resource_providers
data:
uuid: $ENVIRON['CN1_UUID']
name: $ENVIRON['CN1_UUID']
status: 200
- name: set cn1 inventory
PUT: /resource_providers/$ENVIRON['CN1_UUID']/inventories
data:
resource_provider_generation: 0
inventories:
DISK_GB:
total: 20480
- name: set compute node traits
PUT: /resource_providers/$ENVIRON['CN1_UUID']/traits
data:
resource_provider_generation: 1
traits:
- COMPUTE_VOLUME_MULTI_ATTACH
- name: create numa 0
POST: /resource_providers
data:
uuid: $ENVIRON['N0_UUID']
name: numa 0-$ENVIRON['N0_UUID']
parent_provider_uuid: $ENVIRON['CN1_UUID']
- name: set numa 0 inventory
PUT: /resource_providers/$ENVIRON['N0_UUID']/inventories
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 4
reserved: 2
MEMORY_MB:
total: 2048
min_unit: 512
step_size: 256
- name: set numa 0 traits
PUT: /resource_providers/$ENVIRON['N0_UUID']/traits
data:
resource_provider_generation: 1
traits:
- HW_NUMA_ROOT
- name: create fpga0_0
POST: /resource_providers
data:
uuid: $ENVIRON['FPGA0_0_UUID']
name: fpga0-0-$ENVIRON['FPGA0_0_UUID']
parent_provider_uuid: $ENVIRON['N0_UUID']
- name: set fpga0_0 inventory
PUT: /resource_providers/$ENVIRON['FPGA0_0_UUID']/inventories
data:
resource_provider_generation: 0
inventories:
FPGA:
total: 1
- name: create pgpu0_0
POST: /resource_providers
data:
uuid: $ENVIRON['PGPU0_0_UUID']
name: pgpu0-0-$ENVIRON['PGPU0_0_UUID']
parent_provider_uuid: $ENVIRON['N0_UUID']
- name: set pgpu0_0 inventory
PUT: /resource_providers/$ENVIRON['PGPU0_0_UUID']/inventories
data:
resource_provider_generation: 0
inventories:
VGPU:
total: 8
- name: create numa 1
POST: /resource_providers
data:
uuid: $ENVIRON['N1_UUID']
name: numa 1-$ENVIRON['N1_UUID']
parent_provider_uuid: $ENVIRON['CN1_UUID']
- name: set numa 1 inventory
PUT: /resource_providers/$ENVIRON['N1_UUID']/inventories
data:
resource_provider_generation: 0
inventories:
VCPU:
total: 4
MEMORY_MB:
total: 2048
min_unit: 256
max_unit: 1024
- name: set numa 1 traits
PUT: /resource_providers/$ENVIRON['N1_UUID']/traits
data:
resource_provider_generation: 1
traits:
- HW_NUMA_ROOT
- CUSTOM_FOO
- name: create fpga1_0
POST: /resource_providers
data:
uuid: $ENVIRON['FPGA1_0_UUID']
name: fpga1-0-$ENVIRON['FPGA1_0_UUID']
parent_provider_uuid: $ENVIRON['N1_UUID']
- name: set fpga1_0 inventory
PUT: /resource_providers/$ENVIRON['FPGA1_0_UUID']/inventories
data:
resource_provider_generation: 0
inventories:
FPGA:
total: 1
- name: create fpga1_1
POST: /resource_providers
data:
uuid: $ENVIRON['FPGA1_1_UUID']
name: fpga1-1-$ENVIRON['FPGA1_1_UUID']
parent_provider_uuid: $ENVIRON['N1_UUID']
- name: set fpga1_1 inventory
PUT: /resource_providers/$ENVIRON['FPGA1_1_UUID']/inventories
data:
resource_provider_generation: 0
inventories:
FPGA:
total: 1
placement-14.0.0+git20260116.35.cd24dcb5/gate/perfload-nested-loader.sh 0000775 0000000 0000000 00000001323 15132464062 0024465 0 ustar 00root root 0000000 0000000 #!/bin/bash
set -a
HOST=$1
GABBIT=$2
# By default the placement server is set up with noauth2 authentication
# handling. If that is changed to keystone, a $TOKEN can be generated in
# the calling environment and used instead of the default 'admin'.
TOKEN=${TOKEN:-admin}
# These are the dynamic/unique values for individual resource providers
# that need to be set for each run a gabbi file. Values that are the same
# for all the resource providers (for example, traits and inventory) should
# be set in $GABBIT.
CN1_UUID=$(uuidgen)
N0_UUID=$(uuidgen)
N1_UUID=$(uuidgen)
FPGA0_0_UUID=$(uuidgen)
FPGA1_0_UUID=$(uuidgen)
FPGA1_1_UUID=$(uuidgen)
PGPU0_0_UUID=$(uuidgen)
# Run gabbi silently.
gabbi-run -q $HOST -- $GABBIT
placement-14.0.0+git20260116.35.cd24dcb5/gate/perfload-nested-runner.sh 0000775 0000000 0000000 00000011011 15132464062 0024523 0 ustar 00root root 0000000 0000000 #!/bin/bash -x
WORK_DIR=$1
PLACEMENT_URL="http://127.0.0.1:8000"
LOG=placement-perf.txt
LOG_DEST=${WORK_DIR}/logs
# The gabbit used to create one nested provider tree. It takes
# inputs from LOADER to create a unique tree.
GABBIT=gate/gabbits/nested-perfload.yaml
LOADER=gate/perfload-nested-loader.sh
# The query to be used to get a list of allocation candidates. If
# $GABBIT is changed, this may need to change.
TRAIT="COMPUTE_VOLUME_MULTI_ATTACH"
TRAIT1="CUSTOM_FOO"
PLACEMENT_QUERY="resources=DISK_GB:10&required=${TRAIT}&resources_COMPUTE=VCPU:1,MEMORY_MB:256&required_COMPUTE=${TRAIT1}&resources_FPGA=FPGA:1&group_policy=none&same_subtree=_COMPUTE,_FPGA"
# Number of nested trees to create.
ITERATIONS=1000
# Number of times to write allocations and then time again.
ALLOCATIONS_TO_WRITE=10
# Apache Benchmark Concurrency
AB_CONCURRENT=10
# Apache Benchmark Total Requests
AB_COUNT=500
# The number of providers in each nested tree. This will need to
# change whenever the resource provider topology created in $GABBIT
# is changed.
PROVIDER_TOPOLOGY_COUNT=7
# Expected total number of providers, used to check that creation
# was a success.
TOTAL_PROVIDER_COUNT=$((ITERATIONS * PROVIDER_TOPOLOGY_COUNT))
trap "sudo cp -p $LOG $LOG_DEST" EXIT
function time_candidates {
(
echo "##### TIMING GET /allocation_candidates?${PLACEMENT_QUERY} twice"
time curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}" > /dev/null
time curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}" > /dev/null
) 2>&1 | tee -a $LOG
}
function ab_bench {
(
echo "#### Running apache benchmark"
ab -c $AB_CONCURRENT -n $AB_COUNT -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}"
) 2>&1 | tee -a $LOG
}
function write_allocation {
# Take the first allocation request and send it back as a well-formed allocation
curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}&limit=5" \
| jq --arg proj $(uuidgen) --arg user $(uuidgen) '.allocation_requests[0] + {consumer_generation: null, project_id: $proj, user_id: $user, consumer_type: "TEST"}' \
| curl -f -s -S -H 'x-auth-token: admin' -H 'content-type: application/json' -H 'openstack-api-version: placement latest' \
-X PUT -d @- "${PLACEMENT_URL}/allocations/$(uuidgen)"
# curl -f will fail silently on server errors and return code 22
# When used with -s, --silent, -S makes curl show an error message if it fails
# If we failed to write an allocation, skip measurements and log a message
rc=$?
if [[ $rc -eq 22 ]]; then
echo "Failed to write allocation due to a server error. See logs/placement-api.log for additional detail."
exit 1
elif [[ $rc -ne 0 ]]; then
echo "Failed to write allocation, curl returned code: $rc. See job-output.txt for additional detail."
exit 1
fi
}
function load_candidates {
time_candidates
for iter in $(seq 1 $ALLOCATIONS_TO_WRITE); do
echo "##### Writing allocation ${iter}" | tee -a $LOG
write_allocation
time_candidates
done
}
function check_placement {
local rp_count
local code
code=0
python3 -m venv .perfload
. .perfload/bin/activate
# install gabbi
pip install gabbi
# Create $TOTAL_PROVIDER_COUNT nested resource provider trees,
# each tree having $PROVIDER_TOPOLOGY_COUNT resource providers.
# LOADER is called $ITERATIONS times in parallel using 50% of
# the number of processors on the host.
echo "##### Creating $TOTAL_PROVIDER_COUNT providers" | tee -a $LOG
seq 1 $ITERATIONS | parallel -P 50% $LOADER $PLACEMENT_URL $GABBIT
set +x
rp_count=$(curl -H 'x-auth-token: admin' ${PLACEMENT_URL}/resource_providers |json_pp|grep -c '"name"')
# If we failed to create the required number of rps, skip measurements and
# log a message.
if [[ $rp_count -ge $TOTAL_PROVIDER_COUNT ]]; then
load_candidates
ab_bench
else
(
echo "Unable to create expected number of resource providers. Expected: ${COUNT}, Got: $rp_count"
echo "See job-output.txt.gz and logs/placement-api.log for additional detail."
) | tee -a $LOG
code=1
fi
set -x
deactivate
exit $code
}
check_placement
placement-14.0.0+git20260116.35.cd24dcb5/gate/perfload-runner.sh 0000775 0000000 0000000 00000012523 15132464062 0023254 0 ustar 00root root 0000000 0000000 #!/bin/bash -x
WORK_DIR=$1
# Do some performance related information gathering for placement.
EXPLANATION="
This output combines output from placeload with timing information
gathered via curl. The placeload output is the current maximum
microversion of placement followed by an encoded representation of
what it has done. Lowercase 'r', 'i', 'a', and 't' indicate successful
creation of a resource provider and setting inventory, aggregates, and
traits on that resource provider.
If there are upper case versions of any of those letters, a failure
happened for a single request. The letter will be followed by the
HTTP status code and the resource provider uuid. These can be used
to find the relevant entry in logs/placement-api.log.
Note that placeload does not exit with an error code when this
happens. It merely reports and moves on. Under correct circumstances
the right output is a long string of 4000 characters containing
'r', 'i', 'a', 't' in random order (because async).
After that are three aggregate uuids, timing information for the
placeload run, and then timing information for two identical curl
requests for allocation candidates.
If no timed requests are present it means that the expected number
of resource providers were not created. At this time, only resource
providers are counted, not whether they have the correct inventory,
aggregates, or traits.
"
# This aggregate uuid is a static value in placeload.
AGGREGATE="14a5c8a3-5a99-4e8f-88be-00d85fcb1c17"
TRAIT="HW_CPU_X86_AVX2"
PLACEMENT_QUERY="resources=VCPU:1,DISK_GB:10,MEMORY_MB:256&member_of=${AGGREGATE}&required=${TRAIT}"
PLACEMENT_URL="http://127.0.0.1:8000"
LOG=placement-perf.txt
LOG_DEST=${WORK_DIR}/logs
COUNT=1000
# Apache Benchmark Concurrency
AB_CONCURRENT=10
# Apache Benchmark Total Requests
AB_COUNT=500
trap "sudo cp -p $LOG $LOG_DEST" EXIT
function time_candidates {
(
echo "##### TIMING GET /allocation_candidates?${PLACEMENT_QUERY} twice"
time curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}" > /dev/null
time curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}" > /dev/null
) 2>&1 | tee -a $LOG
}
function ab_bench {
(
echo "#### Running apache benchmark"
ab -c $AB_CONCURRENT -n $AB_COUNT -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}"
) 2>&1 | tee -a $LOG
}
function write_allocation {
# Take the first allocation request and send it back as a well-formed allocation
curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}&limit=5" \
| jq --arg proj $(uuidgen) --arg user $(uuidgen) '.allocation_requests[0] + {consumer_generation: null, project_id: $proj, user_id: $user, consumer_type: "TEST"}' \
| curl -f -s -S -H 'x-auth-token: admin' -H 'content-type: application/json' -H 'openstack-api-version: placement latest' \
-X PUT -d @- "${PLACEMENT_URL}/allocations/$(uuidgen)"
rc=$?
# curl -f will fail silently on server errors and return code 22
# When used with -s, --silent, -S makes curl show an error message if it fails
# If we failed to write an allocation, skip measurements and log a message
if [[ $rc -eq 22 ]]; then
echo "Failed to write allocation due to a server error. See logs/placement-api.log for additional detail."
exit 1
elif [[ $rc -ne 0 ]]; then
echo "Failed to write allocation, curl returned code: $rc. See job-output.txt for additional detail."
exit 1
fi
}
function load_candidates {
time_candidates
for iter in {1..99}; do
echo "##### Writing allocation ${iter}" | tee -a $LOG
write_allocation
time_candidates
done
}
function check_placement {
local rp_count
local code
code=0
python3 -m venv .placeload
. .placeload/bin/activate
# Pre-release version is needed for Python 3.10, 3.11, 3.12 support
# (this lib is pulled in by placeload).
# See https://github.com/PyYoshi/cChardet/issues/81 for details
# This can be removed when the latest normal version has support.
pip install 'cchardet>=2.2.0a2'
# install placeload
pip install 'placeload==0.3.0'
set +x
# load with placeload
(
echo "$EXPLANATION"
# preheat the aggregates to avoid https://bugs.launchpad.net/nova/+bug/1804453
placeload $PLACEMENT_URL 10
echo "##### TIMING placeload creating $COUNT resource providers with inventory, aggregates and traits."
time placeload $PLACEMENT_URL $COUNT
) 2>&1 | tee -a $LOG
rp_count=$(curl -H 'x-auth-token: admin' ${PLACEMENT_URL}/resource_providers |json_pp|grep -c '"name"')
# If we failed to create the required number of rps, skip measurements and
# log a message.
if [[ $rp_count -ge $COUNT ]]; then
load_candidates
ab_bench
else
(
echo "Unable to create expected number of resource providers. Expected: ${COUNT}, Got: $rp_count"
echo "See job-output.txt.gz and logs/placement-api.log for additional detail."
) | tee -a $LOG
code=1
fi
set -x
deactivate
exit $code
}
check_placement
placement-14.0.0+git20260116.35.cd24dcb5/gate/perfload-server.sh 0000775 0000000 0000000 00000002510 15132464062 0023244 0 ustar 00root root 0000000 0000000 #!/bin/bash -x
WORK_DIR=$1
# create database
sudo debconf-set-selections <